incremental-marking.cc 36.1 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/heap/incremental-marking.h"
6

7
#include "src/codegen/compilation-cache.h"
8
#include "src/execution/vm-state-inl.h"
9
#include "src/heap/array-buffer-sweeper.h"
10
#include "src/heap/concurrent-marking.h"
11
#include "src/heap/embedder-tracing.h"
12
#include "src/heap/gc-idle-time-handler.h"
13
#include "src/heap/gc-tracer.h"
14
#include "src/heap/heap-inl.h"
15
#include "src/heap/incremental-marking-inl.h"
16
#include "src/heap/mark-compact-inl.h"
17
#include "src/heap/marking-barrier.h"
18 19
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
20
#include "src/heap/memory-chunk.h"
21
#include "src/heap/object-stats.h"
22
#include "src/heap/objects-visiting-inl.h"
23
#include "src/heap/objects-visiting.h"
24
#include "src/heap/safepoint.h"
25
#include "src/heap/sweeper.h"
26
#include "src/init/v8.h"
27
#include "src/numbers/conversions.h"
28 29
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
30
#include "src/objects/hash-table-inl.h"
31
#include "src/objects/slots-inl.h"
32
#include "src/objects/transitions-inl.h"
33
#include "src/objects/visitors.h"
34
#include "src/tracing/trace-event.h"
35
#include "src/utils/utils.h"
36 37 38 39

namespace v8 {
namespace internal {

40 41
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
                                        size_t size) {
42
  Heap* heap = incremental_marking_->heap();
43
  VMState<GC> state(heap->isolate());
44
  RuntimeCallTimerScope runtime_timer(
45 46
      heap->isolate(),
      RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
47
  incremental_marking_->AdvanceOnAllocation();
48
  // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
49
  incremental_marking_->EnsureBlackAllocated(addr, size);
50 51
}

52 53
IncrementalMarking::IncrementalMarking(Heap* heap,
                                       WeakObjects* weak_objects)
54
    : heap_(heap),
55
      collector_(heap->mark_compact_collector()),
56
      weak_objects_(weak_objects),
57 58
      new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
      old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
59 60
  SetState(STOPPED);
}
61

62 63 64 65
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
    HeapObject obj) {
  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
66
  marking_state()->WhiteToGrey(obj);
67
  collector_->VisitObject(obj);
68 69
}

70 71 72 73 74 75 76
void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
  MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
  Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
  MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
  IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
}

77
void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
78
  DCHECK(IsMarking());
79 80
  DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
  DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
81
  DCHECK_NE(from, to);
82

83
  MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
84

85
  if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
86 87 88
    // Nothing to do if the object is in black area.
    return;
  }
89 90 91
  MarkBlackAndVisitObjectDueToLayoutChange(from);
  DCHECK(marking_state()->IsBlack(from));
  // Mark the new address as black.
92
  if (from.address() + kTaggedSize == to.address()) {
93 94 95 96 97 98 99 100
    // The old and the new markbits overlap. The |to| object has the
    // grey color. To make it black, we need to set the second bit.
    DCHECK(new_mark_bit.Get<kAtomicity>());
    new_mark_bit.Next().Set<kAtomicity>();
  } else {
    bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
    DCHECK(success);
    USE(success);
101
  }
102
  DCHECK(marking_state()->IsBlack(to));
103
}
104

105
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
106
 public:
107 108
  explicit IncrementalMarkingRootMarkingVisitor(
      IncrementalMarking* incremental_marking)
109
      : heap_(incremental_marking->heap()) {}
110

111
  void VisitRootPointer(Root root, const char* description,
112
                        FullObjectSlot p) override {
113 114
    MarkObjectByPointer(p);
  }
115

116 117 118
  void VisitRootPointers(Root root, const char* description,
                         FullObjectSlot start, FullObjectSlot end) override {
    for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
119 120 121
  }

 private:
122
  void MarkObjectByPointer(FullObjectSlot p) {
123
    Object obj = *p;
124
    if (!obj.IsHeapObject()) return;
125

126
    heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
127 128
  }

129
  Heap* heap_;
130 131 132
};


133 134 135
bool IncrementalMarking::WasActivated() { return was_activated_; }


136
bool IncrementalMarking::CanBeActivated() {
137 138 139
  // Only start incremental marking in a safe state: 1) when incremental
  // marking is turned on, 2) when we are currently not in a GC, and
  // 3) when we are currently not serializing or deserializing the heap.
140
  return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
141
         heap_->deserialization_complete() &&
142
         !heap_->isolate()->serializer_enabled();
143 144
}

145 146 147 148
bool IncrementalMarking::IsBelowActivationThresholds() const {
  return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
         heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
}
149

150
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
151
  if (FLAG_trace_incremental_marking) {
152 153 154 155 156 157
    const size_t old_generation_size_mb =
        heap()->OldGenerationSizeOfObjects() / MB;
    const size_t old_generation_limit_mb =
        heap()->old_generation_allocation_limit() / MB;
    const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
    const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
158
    heap()->isolate()->PrintWithTimestamp(
159 160
        "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
        "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
161 162
        Heap::GarbageCollectionReasonToString(gc_reason),
        old_generation_size_mb, old_generation_limit_mb,
163 164 165 166 167 168
        old_generation_size_mb > old_generation_limit_mb
            ? 0
            : old_generation_limit_mb - old_generation_size_mb,
        global_size_mb, global_limit_mb,
        global_size_mb > global_limit_mb ? 0
                                         : global_limit_mb - global_size_mb);
169
  }
170 171 172 173
  DCHECK(FLAG_incremental_marking);
  DCHECK(state_ == STOPPED);
  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
  DCHECK(!heap_->isolate()->serializer_enabled());
174

175 176 177 178
  Counters* counters = heap_->isolate()->counters();

  counters->incremental_marking_reason()->AddSample(
      static_cast<int>(gc_reason));
179
  HistogramTimerScope incremental_marking_scope(
180
      counters->gc_incremental_marking_start());
181 182
  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
183
  heap_->tracer()->NotifyIncrementalMarkingStart();
184

185
  start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
186
  time_to_force_completion_ = 0.0;
187
  initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
188
  old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
189 190 191
  bytes_marked_ = 0;
  scheduled_bytes_to_mark_ = 0;
  schedule_update_time_ms_ = start_time_ms_;
192
  bytes_marked_concurrently_ = 0;
193 194
  was_activated_ = true;

195 196 197 198 199 200
  {
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_INCREMENTAL_SWEEP_ARRAY_BUFFERS);
    heap_->array_buffer_sweeper()->EnsureFinished();
  }

201 202 203 204 205 206
  collector_->EnsureSweepingCompleted();
  DCHECK(!collector_->sweeping_in_progress());
#ifdef DEBUG
  heap_->VerifyCountersAfterSweeping();
#endif
  StartMarking();
207

208 209
  heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
                                           &new_generation_observer_);
210
  incremental_marking_job()->Start(heap_);
211 212 213
}


214
void IncrementalMarking::StartMarking() {
hpayer's avatar
hpayer committed
215 216 217 218 219
  if (heap_->isolate()->serializer_enabled()) {
    // Black allocation currently starts when we start incremental marking,
    // but we cannot enable black allocation while deserializing. Hence, we
    // have to delay the start of incremental marking in that case.
    if (FLAG_trace_incremental_marking) {
220 221
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Start delayed - serializer\n");
hpayer's avatar
hpayer committed
222 223 224
    }
    return;
  }
225
  if (FLAG_trace_incremental_marking) {
226 227
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Start marking\n");
228
  }
229 230 231

  heap_->InvokeIncrementalMarkingPrologueCallbacks();

232
  is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
233
  collector_->StartMarking();
234

235
  SetState(MARKING);
236

237
  MarkingBarrier::ActivateAll(heap(), is_compacting_);
238 239 240

  heap_->isolate()->compilation_cache()->MarkCompactPrologue();

241
  StartBlackAllocation();
242

243
  MarkRoots();
244

245
  if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
246
    heap_->concurrent_marking()->ScheduleJob();
247 248
  }

249 250
  // Ready to start incremental marking.
  if (FLAG_trace_incremental_marking) {
251
    heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
252
  }
253 254 255 256 257 258

  {
    // TracePrologue may call back into V8 in corner cases, requiring that
    // marking (including write barriers) is fully set up.
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
259 260
    heap_->local_embedder_heap_tracer()->TracePrologue(
        heap_->flags_for_embedder_tracer());
261
  }
262 263

  heap_->InvokeIncrementalMarkingEpilogueCallbacks();
264 265
}

hpayer's avatar
hpayer committed
266
void IncrementalMarking::StartBlackAllocation() {
267
  DCHECK(!black_allocation_);
hpayer's avatar
hpayer committed
268 269
  DCHECK(IsMarking());
  black_allocation_ = true;
270 271 272
  heap()->old_space()->MarkLinearAllocationAreaBlack();
  heap()->map_space()->MarkLinearAllocationAreaBlack();
  heap()->code_space()->MarkLinearAllocationAreaBlack();
273 274 275 276 277
  if (FLAG_local_heaps) {
    heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
      local_heap->MarkLinearAllocationAreaBlack();
    });
  }
hpayer's avatar
hpayer committed
278
  if (FLAG_trace_incremental_marking) {
279 280
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Black allocation started\n");
hpayer's avatar
hpayer committed
281 282 283
  }
}

284 285
void IncrementalMarking::PauseBlackAllocation() {
  DCHECK(IsMarking());
286 287 288
  heap()->old_space()->UnmarkLinearAllocationArea();
  heap()->map_space()->UnmarkLinearAllocationArea();
  heap()->code_space()->UnmarkLinearAllocationArea();
289 290 291 292 293
  if (FLAG_local_heaps) {
    heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
      local_heap->UnmarkLinearAllocationArea();
    });
  }
294 295 296 297 298 299 300
  if (FLAG_trace_incremental_marking) {
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Black allocation paused\n");
  }
  black_allocation_ = false;
}

hpayer's avatar
hpayer committed
301
void IncrementalMarking::FinishBlackAllocation() {
302 303 304
  if (black_allocation_) {
    black_allocation_ = false;
    if (FLAG_trace_incremental_marking) {
305 306
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Black allocation finished\n");
307
    }
hpayer's avatar
hpayer committed
308 309
  }
}
310

311 312 313
void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
  if (black_allocation() && allocated != kNullAddress) {
    HeapObject object = HeapObject::FromAddress(allocated);
314
    if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
315 316 317 318 319 320 321 322 323 324
      if (heap_->IsLargeObject(object)) {
        marking_state()->WhiteToBlack(object);
      } else {
        Page::FromAddress(allocated)->CreateBlackArea(allocated,
                                                      allocated + size);
      }
    }
  }
}

325 326
void IncrementalMarking::MarkRoots() {
  DCHECK(!finalize_marking_completed_);
327
  DCHECK(IsMarking());
328

329
  IncrementalMarkingRootMarkingVisitor visitor(this);
330 331
  heap_->IterateRoots(
      &visitor, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kWeak});
332
}
333

334
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
335 336 337 338
  if (age == 0) {
    // The map has aged. Do not retain this map.
    return false;
  }
339 340
  Object constructor = map.GetConstructor();
  if (!constructor.IsHeapObject() ||
341
      marking_state()->IsWhite(HeapObject::cast(constructor))) {
342 343 344 345 346 347 348 349 350 351 352 353 354 355
    // The constructor is dead, no new objects with this map can
    // be created. Do not retain this map.
    return false;
  }
  return true;
}


void IncrementalMarking::RetainMaps() {
  // Do not retain dead maps if flag disables it or there is
  // - memory pressure (reduce_memory_footprint_),
  // - GC is requested by tests or dev-tools (abort_incremental_marking_).
  bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
                                   FLAG_retain_maps_for_n_gc == 0;
356 357 358 359 360 361 362 363 364 365
  std::vector<WeakArrayList> retained_maps_list = heap()->FindAllRetainedMaps();

  for (WeakArrayList retained_maps : retained_maps_list) {
    int length = retained_maps.length();

    for (int i = 0; i < length; i += 2) {
      MaybeObject value = retained_maps.Get(i);
      HeapObject map_heap_object;
      if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
        continue;
366
      }
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
      int age = retained_maps.Get(i + 1).ToSmi().value();
      int new_age;
      Map map = Map::cast(map_heap_object);
      if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
        if (ShouldRetainMap(map, age)) {
          WhiteToGreyAndPush(map);
        }
        Object prototype = map.prototype();
        if (age > 0 && prototype.IsHeapObject() &&
            marking_state()->IsWhite(HeapObject::cast(prototype))) {
          // The prototype is not marked, age the map.
          new_age = age - 1;
        } else {
          // The prototype and the constructor are marked, this map keeps only
          // transition tree alive, not JSObjects. Do not age the map.
          new_age = age;
        }
384
      } else {
385 386 387 388 389
        new_age = FLAG_retain_maps_for_n_gc;
      }
      // Compact the array and update the age.
      if (new_age != age) {
        retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
390 391 392 393 394
      }
    }
  }
}

395
void IncrementalMarking::FinalizeIncrementally() {
396
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
397 398 399
  DCHECK(!finalize_marking_completed_);
  DCHECK(IsMarking());

400 401
  double start = heap_->MonotonicallyIncreasingTimeInMs();

402 403 404
  // After finishing incremental marking, we try to discover all unmarked
  // objects to reduce the marking load in the final pause.
  // 1) We scan and mark the roots again to find all changes to the root set.
405
  // 2) Age and retain maps embedded in optimized code.
406
  MarkRoots();
407

408 409 410
  // Map retaining is needed for perfromance, not correctness,
  // so we can do it only once at the beginning of the finalization.
  RetainMaps();
411

412 413
  MarkingBarrier::PublishAll(heap());

414
  finalize_marking_completed_ = true;
415

416 417 418 419 420 421
  if (FLAG_trace_incremental_marking) {
    double end = heap_->MonotonicallyIncreasingTimeInMs();
    double delta = end - start;
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
  }
422 423
}

424
void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
425 426
  if (!IsMarking()) return;

427
  Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
428

429
#ifdef ENABLE_MINOR_MC
430 431
  MinorMarkCompactCollector::MarkingState* minor_marking_state =
      heap()->minor_mark_compact_collector()->marking_state();
432
#endif  // ENABLE_MINOR_MC
433

434
  collector_->local_marking_worklists()->Publish();
435
  MarkingBarrier::PublishAll(heap());
436
  collector_->marking_worklists()->Update(
437
      [
438
#ifdef DEBUG
439 440
          // this is referred inside DCHECK.
          this,
441
#endif
442 443 444 445
#ifdef ENABLE_MINOR_MC
          minor_marking_state,
#endif
          filler_map](HeapObject obj, HeapObject* out) -> bool {
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
        DCHECK(obj.IsHeapObject());
        // Only pointers to from space have to be updated.
        if (Heap::InFromPage(obj)) {
          MapWord map_word = obj.map_word();
          if (!map_word.IsForwardingAddress()) {
            // There may be objects on the marking deque that do not exist
            // anymore, e.g. left trimmed objects or objects from the root set
            // (frames). If these object are dead at scavenging time, their
            // marking deque entries will not point to forwarding addresses.
            // Hence, we can discard them.
            return false;
          }
          HeapObject dest = map_word.ToForwardingAddress();
          DCHECK_IMPLIES(marking_state()->IsWhite(obj),
                         obj.IsFreeSpaceOrFiller());
          *out = dest;
          return true;
        } else if (Heap::InToPage(obj)) {
          // The object may be on a large page or on a page that was moved in
          // new space.
          DCHECK(Heap::IsLargeObject(obj) ||
                 Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
468
#ifdef ENABLE_MINOR_MC
469 470 471
          if (minor_marking_state->IsWhite(obj)) {
            return false;
          }
472
#endif  // ENABLE_MINOR_MC
473 474 475 476 477 478 479 480
        // Either a large object or an object marked by the minor
        // mark-compactor.
          *out = obj;
          return true;
        } else {
          // The object may be on a page that was moved from new to old space.
          // Only applicable during minor MC garbage collections.
          if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
481
#ifdef ENABLE_MINOR_MC
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
            if (minor_marking_state->IsWhite(obj)) {
              return false;
            }
#endif  // ENABLE_MINOR_MC
            *out = obj;
            return true;
          }
          DCHECK_IMPLIES(marking_state()->IsWhite(obj),
                         obj.IsFreeSpaceOrFiller());
          // Skip one word filler objects that appear on the
          // stack when we perform in place array shift.
          if (obj.map() != filler_map) {
            *out = obj;
            return true;
          }
497
          return false;
498
        }
499
      });
500

501
  weak_objects_->UpdateAfterScavenge();
502 503
}

504 505 506
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
    size_t dead_bytes_in_new_space) {
  if (!IsMarking()) return;
507
  bytes_marked_ -= std::min(bytes_marked_, dead_bytes_in_new_space);
508 509
}

510
void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
511
  if (IsMarking() && marking_state()->IsBlack(obj)) {
512
    collector_->RevisitObject(obj);
513 514 515
  }
}

516 517 518 519 520 521
StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
                                            double* duration_ms) {
  if (!ShouldDoEmbedderStep()) {
    *duration_ms = 0.0;
    return StepResult::kNoImmediateWork;
  }
522

523
  constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
524

525
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
526
  LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
527
  const double start = heap_->MonotonicallyIncreasingTimeInMs();
528
  const double deadline = start + expected_duration_ms;
529
  bool empty_worklist;
530 531 532 533 534
  {
    LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
    HeapObject object;
    size_t cnt = 0;
    empty_worklist = true;
535
    while (local_marking_worklists()->PopEmbedder(&object)) {
536 537 538
      scope.TracePossibleWrapper(JSObject::cast(object));
      if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
        if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
539 540 541
          empty_worklist = false;
          break;
        }
542
        cnt = 0;
543 544
      }
    }
545
  }
546 547 548
  // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
  // which means |local_tracer| won't do any actual tracing, so there is no
  // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
549 550 551
  bool remote_tracing_done =
      local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
  double current = heap_->MonotonicallyIncreasingTimeInMs();
552
  local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
553
  *duration_ms = current - start;
554 555 556
  return (empty_worklist && remote_tracing_done)
             ? StepResult::kNoImmediateWork
             : StepResult::kMoreWorkRemaining;
557
}
558

559
void IncrementalMarking::Hurry() {
560
  if (!local_marking_worklists()->IsEmpty()) {
561
    double start = 0.0;
562
    if (FLAG_trace_incremental_marking) {
563
      start = heap_->MonotonicallyIncreasingTimeInMs();
564
      if (FLAG_trace_incremental_marking) {
565
        heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
566
      }
567
    }
568
    collector_->ProcessMarkingWorklist(0);
569
    SetState(COMPLETE);
570
    if (FLAG_trace_incremental_marking) {
571
      double end = heap_->MonotonicallyIncreasingTimeInMs();
572 573
      double delta = end - start;
      if (FLAG_trace_incremental_marking) {
574 575 576
        heap()->isolate()->PrintWithTimestamp(
            "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
            static_cast<int>(delta));
577
      }
578
    }
579 580 581 582
  }
}


583
void IncrementalMarking::Stop() {
584 585
  if (IsStopped()) return;
  if (FLAG_trace_incremental_marking) {
586
    int old_generation_size_mb =
587
        static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
588 589 590 591 592 593
    int old_generation_limit_mb =
        static_cast<int>(heap()->old_generation_allocation_limit() / MB);
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
        "overshoot %dMB\n",
        old_generation_size_mb, old_generation_limit_mb,
594
        std::max(0, old_generation_size_mb - old_generation_limit_mb));
595
  }
596

597
  SpaceIterator it(heap_);
598 599
  while (it.HasNext()) {
    Space* space = it.Next();
600 601 602 603 604 605 606
    if (space == heap_->new_space()) {
      space->RemoveAllocationObserver(&new_generation_observer_);
    } else {
      space->RemoveAllocationObserver(&old_generation_observer_);
    }
  }

607
  heap_->isolate()->stack_guard()->ClearGC();
608
  SetState(STOPPED);
609
  is_compacting_ = false;
hpayer's avatar
hpayer committed
610
  FinishBlackAllocation();
611 612 613 614 615 616 617 618 619 620 621 622 623 624

  if (FLAG_local_heaps) {
    // Merge live bytes counters of background threads
    for (auto pair : background_live_bytes_) {
      MemoryChunk* memory_chunk = pair.first;
      intptr_t live_bytes = pair.second;

      if (live_bytes) {
        marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
      }
    }

    background_live_bytes_.clear();
  }
625 626 627 628 629
}


void IncrementalMarking::Finalize() {
  Hurry();
630
  Stop();
631 632 633
}


634 635
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
  DCHECK(!finalize_marking_completed_);
636
  if (FLAG_trace_incremental_marking) {
637
    heap()->isolate()->PrintWithTimestamp(
638 639
        "[IncrementalMarking] requesting finalization of incremental "
        "marking.\n");
640
  }
641
  request_type_ = FINALIZATION;
642 643 644
  if (action == GC_VIA_STACK_GUARD) {
    heap_->isolate()->stack_guard()->RequestGC();
  }
645 646
}

647 648 649 650 651
double IncrementalMarking::CurrentTimeToMarkingTask() const {
  const double recorded_time_to_marking_task =
      heap_->tracer()->AverageTimeToIncrementalMarkingTask();
  const double current_time_to_marking_task =
      incremental_marking_job_.CurrentTimeToTask(heap_);
652
  if (recorded_time_to_marking_task == 0.0) return 0.0;
653
  return std::max(recorded_time_to_marking_task, current_time_to_marking_task);
654
}
655

656
void IncrementalMarking::MarkingComplete(CompletionAction action) {
657 658 659 660 661 662 663 664 665 666
  // Allowed overshoot percantage of incremental marking walltime.
  constexpr double kAllowedOvershoot = 0.1;
  // Minimum overshoot in ms. This is used to allow moving away from stack when
  // marking was fast.
  constexpr double kMinOvershootMs = 50;

  if (action == GC_VIA_STACK_GUARD) {
    if (time_to_force_completion_ == 0.0) {
      const double now = heap_->MonotonicallyIncreasingTimeInMs();
      const double overshoot_ms =
667
          std::max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
      const double time_to_marking_task = CurrentTimeToMarkingTask();
      if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
        if (FLAG_trace_incremental_marking) {
          heap()->isolate()->PrintWithTimestamp(
              "[IncrementalMarking] Not delaying marking completion. time to "
              "task: %fms allowed overshoot: %fms\n",
              time_to_marking_task, overshoot_ms);
        }
      } else {
        time_to_force_completion_ = now + overshoot_ms;
        if (FLAG_trace_incremental_marking) {
          heap()->isolate()->PrintWithTimestamp(
              "[IncrementalMarking] Delaying GC via stack guard. time to task: "
              "%fms "
              "allowed overshoot: %fms\n",
              time_to_marking_task, overshoot_ms);
        }
685 686
        incremental_marking_job_.ScheduleTask(
            heap(), IncrementalMarkingJob::TaskType::kNormal);
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
        return;
      }
    }
    if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
      if (FLAG_trace_incremental_marking) {
        heap()->isolate()->PrintWithTimestamp(
            "[IncrementalMarking] Delaying GC via stack guard. time left: "
            "%fms\n",
            time_to_force_completion_ -
                heap_->MonotonicallyIncreasingTimeInMs());
      }
      return;
    }
  }

702
  SetState(COMPLETE);
703 704 705
  // We will set the stack guard to request a GC now.  This will mean the rest
  // of the GC gets performed as soon as possible (we can't do a GC here in a
  // record-write context).  If a few things get allocated between now and then
706
  // that shouldn't make us do a scavenge and keep being incremental.
707
  if (FLAG_trace_incremental_marking) {
708 709
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Complete (normal).\n");
710
  }
711
  request_type_ = COMPLETE_MARKING;
712
  if (action == GC_VIA_STACK_GUARD) {
713 714
    heap_->isolate()->stack_guard()->RequestGC();
  }
715 716
}

717 718
void IncrementalMarking::Epilogue() {
  was_activated_ = false;
719
  finalize_marking_completed_ = false;
720
}
721

722 723 724 725 726
bool IncrementalMarking::ShouldDoEmbedderStep() {
  return state_ == MARKING && FLAG_incremental_marking_wrappers &&
         heap_->local_embedder_heap_tracer()->InUse();
}

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
void IncrementalMarking::FastForwardSchedule() {
  if (scheduled_bytes_to_mark_ < bytes_marked_) {
    scheduled_bytes_to_mark_ = bytes_marked_;
    if (FLAG_trace_incremental_marking) {
      heap_->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Fast-forwarded schedule\n");
    }
  }
}

void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
  // Consider marking close to finalization if 75% of the initial old
  // generation was marked.
  if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
    FastForwardSchedule();
  }
}

void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
  // Time interval that should be sufficient to complete incremental marking.
  constexpr double kTargetMarkingWallTimeInMs = 500;
  constexpr double kMinTimeBetweenScheduleInMs = 10;
  if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
  double delta_ms =
751
      std::min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
752 753 754 755 756 757 758 759
  schedule_update_time_ms_ = time_ms;

  size_t bytes_to_mark =
      (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
  AddScheduledBytesToMark(bytes_to_mark);

  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
760 761
        "[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
        "%.1fms\n",
762 763 764 765 766 767
        bytes_to_mark / KB, delta_ms);
  }
}

namespace {
StepResult CombineStepResults(StepResult a, StepResult b) {
768 769
  DCHECK_NE(StepResult::kWaitingForFinalization, a);
  DCHECK_NE(StepResult::kWaitingForFinalization, b);
770 771 772 773
  if (a == StepResult::kMoreWorkRemaining ||
      b == StepResult::kMoreWorkRemaining)
    return StepResult::kMoreWorkRemaining;
  return StepResult::kNoImmediateWork;
774 775 776 777
}
}  // anonymous namespace

StepResult IncrementalMarking::AdvanceWithDeadline(
778
    double deadline_in_ms, CompletionAction completion_action,
779
    StepOrigin step_origin) {
780 781
  HistogramTimerScope incremental_marking_scope(
      heap_->isolate()->counters()->gc_incremental_marking());
782 783
  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
784 785
  DCHECK(!IsStopped());

786 787
  ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
  FastForwardScheduleIfCloseToFinalization();
788
  return Step(kStepSizeInMs, completion_action, step_origin);
789 790
}

791 792 793
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
  // Update bytes_allocated_ based on the allocation counter.
  size_t current_counter = heap_->OldGenerationAllocationCounter();
794
  size_t result = current_counter - old_generation_allocation_counter_;
795
  old_generation_allocation_counter_ = current_counter;
796
  return result;
797 798 799
}

size_t IncrementalMarking::StepSizeToMakeProgress() {
800 801
  const size_t kTargetStepCount = 256;
  const size_t kTargetStepCountAtOOM = 32;
802
  const size_t kMaxStepSizeInByte = 256 * KB;
803 804
  size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;

805
  if (!heap()->CanExpandOldGeneration(oom_slack)) {
806
    return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
807 808
  }

809 810 811
  return std::min(std::max({initial_old_generation_size_ / kTargetStepCount,
                            IncrementalMarking::kMinStepSizeInBytes}),
                  kMaxStepSizeInByte);
812 813
}

814 815 816 817 818 819
void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
  if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
    // The overflow case.
    scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
  } else {
    scheduled_bytes_to_mark_ += bytes_to_mark;
820
  }
821
}
822

823 824 825 826 827
void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
  size_t progress_bytes = StepSizeToMakeProgress();
  size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
  size_t bytes_to_mark = progress_bytes + allocation_bytes;
  AddScheduledBytesToMark(bytes_to_mark);
828

829 830
  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
831 832
        "[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
        "(progress=%zuKB, allocation=%zuKB)\n",
833
        bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
834 835 836
  }
}

837
void IncrementalMarking::FetchBytesMarkedConcurrently() {
838 839 840 841 842 843
  if (FLAG_concurrent_marking) {
    size_t current_bytes_marked_concurrently =
        heap()->concurrent_marking()->TotalMarkedBytes();
    // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
    // short period of time when a concurrent marking task is finishing.
    if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
844
      bytes_marked_ +=
845 846 847
          current_bytes_marked_concurrently - bytes_marked_concurrently_;
      bytes_marked_concurrently_ = current_bytes_marked_concurrently;
    }
848 849
    if (FLAG_trace_incremental_marking) {
      heap_->isolate()->PrintWithTimestamp(
850
          "[IncrementalMarking] Marked %zuKB on background threads\n",
851 852
          heap_->concurrent_marking()->TotalMarkedBytes() / KB);
    }
853
  }
854 855 856 857 858 859 860
}

size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
  FetchBytesMarkedConcurrently();
  if (FLAG_trace_incremental_marking) {
    if (scheduled_bytes_to_mark_ > bytes_marked_) {
      heap_->isolate()->PrintWithTimestamp(
861
          "[IncrementalMarking] Marker is %zuKB behind schedule\n",
862 863 864
          (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
    } else {
      heap_->isolate()->PrintWithTimestamp(
865
          "[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
866 867
          (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
    }
868
  }
869 870 871 872 873 874
  // Allow steps on allocation to get behind the schedule by small ammount.
  // This gives higher priority to steps in tasks.
  size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
  if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
    return 0;
  return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
875
}
876

877 878 879 880
void IncrementalMarking::AdvanceOnAllocation() {
  // Code using an AlwaysAllocateScope assumes that the GC state does not
  // change; that implies that no marking steps must be performed.
  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
881
      state_ != MARKING || heap_->always_allocate()) {
882 883 884 885 886 887 888
    return;
  }
  HistogramTimerScope incremental_marking_scope(
      heap_->isolate()->counters()->gc_incremental_marking());
  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
  ScheduleBytesToMarkBasedOnAllocation();
889
  Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
890 891
}

892 893 894
StepResult IncrementalMarking::Step(double max_step_size_in_ms,
                                    CompletionAction action,
                                    StepOrigin step_origin) {
895
  double start = heap_->MonotonicallyIncreasingTimeInMs();
896

897 898 899 900 901
  StepResult combined_result = StepResult::kMoreWorkRemaining;
  size_t bytes_to_process = 0;
  size_t v8_bytes_processed = 0;
  double embedder_duration = 0.0;
  double embedder_deadline = 0.0;
902
  if (state_ == MARKING) {
903 904 905 906
    if (FLAG_concurrent_marking) {
      // It is safe to merge back all objects that were on hold to the shared
      // work list at Step because we are at a safepoint where all objects
      // are properly initialized.
907
      local_marking_worklists()->MergeOnHold();
908
    }
909 910 911

// Only print marking worklist in debug mode to save ~40KB of code size.
#ifdef DEBUG
912 913
    if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
        FLAG_trace_gc_verbose) {
914
      collector_->marking_worklists()->Print();
915
    }
916
#endif
917 918 919 920 921 922 923
    if (FLAG_trace_incremental_marking) {
      heap_->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Marking speed %.fKB/ms\n",
          heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
    }
    // The first step after Scavenge will see many allocated bytes.
    // Cap the step size to distribute the marking work more uniformly.
924 925
    const double marking_speed =
        heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
926
    size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
927
        max_step_size_in_ms, marking_speed);
928 929 930
    bytes_to_process =
        std::min(ComputeStepSizeInBytes(step_origin), max_step_size);
    bytes_to_process = std::max({bytes_to_process, kMinStepSizeInBytes});
931 932 933 934 935 936 937 938

    // Perform a single V8 and a single embedder step. In case both have been
    // observed as empty back to back, we can finalize.
    //
    // This ignores that case where the embedder finds new V8-side objects. The
    // assumption is that large graphs are well connected and can mostly be
    // processed on their own. For small graphs, helping is not necessary.
    v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
939
    StepResult v8_result = local_marking_worklists()->IsEmpty()
940 941 942 943
                               ? StepResult::kNoImmediateWork
                               : StepResult::kMoreWorkRemaining;
    StepResult embedder_result = StepResult::kNoImmediateWork;
    if (heap_->local_embedder_heap_tracer()->InUse()) {
944
      embedder_deadline =
945 946
          std::min(max_step_size_in_ms,
                   static_cast<double>(bytes_to_process) / marking_speed);
947 948 949
      // TODO(chromium:1056170): Replace embedder_deadline with bytes_to_process
      // after migrating blink to the cppgc library and after v8 can directly
      // push objects to Oilpan.
950 951
      embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
    }
952 953
    bytes_marked_ += v8_bytes_processed;
    combined_result = CombineStepResults(v8_result, embedder_result);
954

955 956 957 958 959 960 961 962 963
    if (combined_result == StepResult::kNoImmediateWork) {
      if (!finalize_marking_completed_) {
        FinalizeMarking(action);
        FastForwardSchedule();
        combined_result = StepResult::kWaitingForFinalization;
        incremental_marking_job()->Start(heap_);
      } else {
        MarkingComplete(action);
        combined_result = StepResult::kWaitingForFinalization;
964
      }
965
    }
966
    if (FLAG_concurrent_marking) {
967
      local_marking_worklists()->ShareWork();
968
      heap_->concurrent_marking()->RescheduleJobIfNeeded();
969
    }
970
  }
971
  if (state_ == MARKING) {
972 973 974 975 976
    // Note that we do not report any marked by in case of finishing sweeping as
    // we did not process the marking worklist.
    const double v8_duration =
        heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
    heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
977
  }
978 979
  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
980 981
        "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
        "in %.1f\n",
982
        step_origin == StepOrigin::kV8 ? "in v8" : "in task",
983
        v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
984
        embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
985
  }
986
  return combined_result;
987
}
988

989 990
}  // namespace internal
}  // namespace v8