incremental-marking.cc 42.3 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/heap/incremental-marking.h"
6

7 8 9
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
10
#include "src/heap/gc-idle-time-handler.h"
11
#include "src/heap/gc-tracer.h"
12
#include "src/heap/mark-compact-inl.h"
13
#include "src/heap/object-stats.h"
14
#include "src/heap/objects-visiting-inl.h"
15
#include "src/heap/objects-visiting.h"
16
#include "src/tracing/trace-event.h"
17
#include "src/v8.h"
18 19 20 21 22 23 24

namespace v8 {
namespace internal {

IncrementalMarking::IncrementalMarking(Heap* heap)
    : heap_(heap),
      state_(STOPPED),
25
      initial_old_generation_size_(0),
26
      bytes_marked_ahead_of_schedule_(0),
27
      unscanned_bytes_of_large_object_(0),
28 29 30 31
      idle_marking_delay_counter_(0),
      incremental_marking_finalization_rounds_(0),
      is_compacting_(false),
      should_hurry_(false),
32
      was_activated_(false),
hpayer's avatar
hpayer committed
33
      black_allocation_(false),
34
      finalize_marking_completed_(false),
35 36 37
      request_type_(NONE),
      new_generation_observer_(*this, kAllocatedThreshold),
      old_generation_observer_(*this, kAllocatedThreshold) {}
38

39
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
40
  HeapObject* value_heap_obj = HeapObject::cast(value);
41
  MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj);
hpayer's avatar
hpayer committed
42 43
  DCHECK(!Marking::IsImpossible(value_bit));

44
  MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj);
hpayer's avatar
hpayer committed
45
  DCHECK(!Marking::IsImpossible(obj_bit));
46
  bool is_black = Marking::IsBlack(obj_bit);
hpayer's avatar
hpayer committed
47

48 49 50 51 52
  if (is_black && Marking::IsWhite(value_bit)) {
    WhiteToGreyAndPush(value_heap_obj, value_bit);
    RestartIfNotMarking();
  }
  return is_compacting_ && is_black;
53 54 55
}


56
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
57
                                         Object* value) {
58
  if (BaseRecordWrite(obj, value) && slot != NULL) {
59 60
    // Object is not going to be rescanned we need to record the slot.
    heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
61 62 63 64
  }
}


65
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
66
                                             Isolate* isolate) {
67
  DCHECK(obj->IsHeapObject());
68
  isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
69 70
}

71 72 73 74 75 76 77 78 79 80
// static
void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
                                                        Object** slot,
                                                        Isolate* isolate) {
  DCHECK(host->IsJSFunction());
  IncrementalMarking* marking = isolate->heap()->incremental_marking();
  Code* value = Code::cast(
      Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
  marking->RecordWriteOfCodeEntry(host, slot, value);
}
81

82
void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
83 84
                                               HeapObject* value) {
  if (IsMarking()) {
jochen's avatar
jochen committed
85
    RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
86 87 88 89 90
    RecordWriteIntoCode(host, &rinfo, value);
  }
}


91 92
void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
  if (IsMarking()) {
93 94 95
    Code* host = heap_->isolate()
                     ->inner_pointer_to_code_cache()
                     ->GcSafeFindCodeForInnerPointer(pc);
jochen's avatar
jochen committed
96
    RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
97 98 99 100 101
    RecordWriteIntoCode(host, &rinfo, value);
  }
}


102
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
103 104
                                                    Object** slot,
                                                    Code* value) {
105
  if (BaseRecordWrite(host, value)) {
106
    DCHECK(slot != NULL);
107
    heap_->mark_compact_collector()->RecordCodeEntrySlot(
108
        host, reinterpret_cast<Address>(slot), value);
109 110 111
  }
}

112
void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
113
                                                 Object* value) {
114 115 116
  if (BaseRecordWrite(host, value)) {
    // Object is not going to be rescanned.  We need to record the slot.
    heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
117 118 119
  }
}

120

121 122 123 124 125 126
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
  Marking::WhiteToGrey(mark_bit);
  heap_->mark_compact_collector()->marking_deque()->Push(obj);
}


127 128 129
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
  if (obj->IsHeapObject()) {
    HeapObject* heap_obj = HeapObject::cast(obj);
130
    MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
131
    if (Marking::IsBlack(mark_bit)) {
132
      MemoryChunk::IncrementLiveBytes(heap_obj, -heap_obj->Size());
133 134 135 136 137
    }
    Marking::AnyToGrey(mark_bit);
  }
}

138 139 140 141 142 143
void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
                                      Address new_start) {
  // This is only used when resizing an object.
  DCHECK(MemoryChunk::FromAddress(old_start) ==
         MemoryChunk::FromAddress(new_start));

144
  if (!heap->incremental_marking()->IsMarking()) return;
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173

  // If the mark doesn't move, we don't check the color of the object.
  // It doesn't matter whether the object is black, since it hasn't changed
  // size, so the adjustment to the live data count will be zero anyway.
  if (old_start == new_start) return;

  MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(new_start);
  MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(old_start);

#ifdef DEBUG
  Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
#endif

  if (Marking::IsBlack(old_mark_bit)) {
    Marking::BlackToWhite(old_mark_bit);
    Marking::MarkBlack(new_mark_bit);
    return;
  } else if (Marking::IsGrey(old_mark_bit)) {
    Marking::GreyToWhite(old_mark_bit);
    heap->incremental_marking()->WhiteToGreyAndPush(
        HeapObject::FromAddress(new_start), new_mark_bit);
    heap->incremental_marking()->RestartIfNotMarking();
  }

#ifdef DEBUG
  Marking::ObjectColor new_color = Marking::Color(new_mark_bit);
  DCHECK(new_color == old_color);
#endif
}
174

175 176
class IncrementalMarkingMarkingVisitor
    : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
177
 public:
178 179
  static void Initialize() {
    StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
180
    table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
181
    table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
182 183
  }

184 185 186 187 188
  static const int kProgressBarScanningChunk = 32 * 1024;

  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
    if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
189 190
      DCHECK(!FLAG_use_marking_progress_bar ||
             chunk->owner()->identity() == LO_SPACE);
191 192 193 194 195 196
      Heap* heap = map->GetHeap();
      // When using a progress bar for large fixed arrays, scan only a chunk of
      // the array and try to push it onto the marking deque again until it is
      // fully scanned. Fall back to scanning it through to the end in case this
      // fails because of a full deque.
      int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
197 198 199 200
      int start_offset =
          Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
      int end_offset =
          Min(object_size, start_offset + kProgressBarScanningChunk);
201
      int already_scanned_offset = start_offset;
202 203
      bool scan_until_end = false;
      do {
204 205
        VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
                      HeapObject::RawField(object, end_offset));
206 207
        start_offset = end_offset;
        end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
hpayer's avatar
hpayer committed
208 209
        scan_until_end =
            heap->mark_compact_collector()->marking_deque()->IsFull();
210 211 212
      } while (scan_until_end && start_offset < object_size);
      chunk->set_progress_bar(start_offset);
      if (start_offset < object_size) {
213
        if (Marking::IsGrey(ObjectMarking::MarkBitFrom(object))) {
214
          heap->mark_compact_collector()->marking_deque()->Unshift(object);
215
        } else {
216
          DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
217
          heap->mark_compact_collector()->UnshiftBlack(object);
218
        }
219 220
        heap->incremental_marking()->NotifyIncompleteScanOfObject(
            object_size - (start_offset - already_scanned_offset));
221 222 223 224 225 226
      }
    } else {
      FixedArrayVisitor::Visit(map, object);
    }
  }

227 228 229
  static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
    Context* context = Context::cast(object);

230 231 232 233
    // We will mark cache black with a separate pass when we finish marking.
    // Note that GC can happen when the context is not fully initialized,
    // so the cache can be undefined.
    Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
234
    if (!cache->IsUndefined(map->GetIsolate())) {
235 236
      MarkObjectGreyDoNotEnqueue(cache);
    }
237 238 239
    VisitNativeContext(map, context);
  }

240 241 242 243 244
  INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
    Object* target = *p;
    if (target->IsHeapObject()) {
      heap->mark_compact_collector()->RecordSlot(object, p, target);
      MarkObject(heap, target);
245 246 247
    }
  }

248 249
  INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
                                   Object** start, Object** end)) {
250
    for (Object** p = start; p < end; p++) {
251 252 253 254
      Object* target = *p;
      if (target->IsHeapObject()) {
        heap->mark_compact_collector()->RecordSlot(object, p, target);
        MarkObject(heap, target);
255 256 257 258
      }
    }
  }

259
  // Marks the object grey and pushes it on the marking stack.
260
  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
261
    IncrementalMarking::MarkGrey(heap, HeapObject::cast(obj));
262
  }
263 264 265 266 267

  // Marks the object black without pushing it on the marking stack.
  // Returns true if object needed marking and false otherwise.
  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
    HeapObject* heap_object = HeapObject::cast(obj);
268
    MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
269
    if (Marking::IsWhite(mark_bit)) {
270
      Marking::MarkBlack(mark_bit);
271
      MemoryChunk::IncrementLiveBytes(heap_object, heap_object->Size());
272 273 274 275
      return true;
    }
    return false;
  }
276 277
};

hpayer's avatar
hpayer committed
278
void IncrementalMarking::IterateBlackObject(HeapObject* object) {
279
  if (IsMarking() && Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
280 281
    Page* page = Page::FromAddress(object->address());
    if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
282
      // IterateBlackObject requires us to visit the whole object.
283 284
      page->ResetProgressBar();
    }
285 286 287
    Map* map = object->map();
    MarkGrey(heap_, map);
    IncrementalMarkingMarkingVisitor::IterateBody(map, object);
hpayer's avatar
hpayer committed
288 289
  }
}
290 291 292

class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
 public:
293 294
  explicit IncrementalMarkingRootMarkingVisitor(
      IncrementalMarking* incremental_marking)
295
      : heap_(incremental_marking->heap()) {}
296

297
  void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
298

299
  void VisitPointers(Object** start, Object** end) override {
300 301 302 303 304 305 306 307
    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
  }

 private:
  void MarkObjectByPointer(Object** p) {
    Object* obj = *p;
    if (!obj->IsHeapObject()) return;

308
    IncrementalMarking::MarkGrey(heap_, HeapObject::cast(obj));
309 310
  }

311
  Heap* heap_;
312 313 314
};


315 316 317 318 319
void IncrementalMarking::Initialize() {
  IncrementalMarkingMarkingVisitor::Initialize();
}


320
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
321 322
                                              bool is_marking,
                                              bool is_compacting) {
323 324 325 326 327 328 329 330 331 332
  if (is_marking) {
    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
  } else {
    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
  }
}


333
void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
334 335 336 337 338 339 340 341 342 343 344 345
                                              bool is_marking) {
  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
  if (is_marking) {
    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
  } else {
    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
  }
}


void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
    PagedSpace* space) {
346
  for (Page* p : *space) {
347
    SetOldSpacePageFlags(p, false, false);
348 349 350 351 352 353
  }
}


void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
    NewSpace* space) {
354
  for (Page* p : *space) {
355 356 357 358 359 360
    SetNewSpacePageFlags(p, false);
  }
}


void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
361
  DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
362 363 364 365
  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());

366
  for (LargePage* lop : *heap_->lo_space()) {
367
    SetOldSpacePageFlags(lop, false, false);
368 369 370 371 372
  }
}


void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
373
  for (Page* p : *space) {
374
    SetOldSpacePageFlags(p, true, is_compacting_);
375 376 377 378 379
  }
}


void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
380
  for (Page* p : *space) {
381 382 383 384 385 386
    SetNewSpacePageFlags(p, true);
  }
}


void IncrementalMarking::ActivateIncrementalWriteBarrier() {
387
  ActivateIncrementalWriteBarrier(heap_->old_space());
388 389 390 391
  ActivateIncrementalWriteBarrier(heap_->map_space());
  ActivateIncrementalWriteBarrier(heap_->code_space());
  ActivateIncrementalWriteBarrier(heap_->new_space());

392
  for (LargePage* lop : *heap_->lo_space()) {
393
    SetOldSpacePageFlags(lop, true, is_compacting_);
394 395 396 397
  }
}


398 399 400
bool IncrementalMarking::WasActivated() { return was_activated_; }


401
bool IncrementalMarking::CanBeActivated() {
402 403 404
  // Only start incremental marking in a safe state: 1) when incremental
  // marking is turned on, 2) when we are currently not in a GC, and
  // 3) when we are currently not serializing or deserializing the heap.
405
  return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
406
         heap_->deserialization_complete() &&
407
         !heap_->isolate()->serializer_enabled();
408 409 410 411
}


void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
412
  DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427

  if (!IsMarking()) {
    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
    // we don't need to do anything if incremental marking is
    // not active.
  } else if (IsCompacting()) {
    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
  } else {
    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
  }
}


static void PatchIncrementalMarkingRecordWriteStubs(
    Heap* heap, RecordWriteStub::Mode mode) {
428
  UnseededNumberDictionary* stubs = heap->code_stubs();
429 430

  int capacity = stubs->Capacity();
431
  Isolate* isolate = heap->isolate();
432 433
  for (int i = 0; i < capacity; i++) {
    Object* k = stubs->KeyAt(i);
434
    if (stubs->IsKey(isolate, k)) {
435 436
      uint32_t key = NumberToUint32(k);

437
      if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
438 439 440 441 442 443 444 445 446
        Object* e = stubs->ValueAt(i);
        if (e->IsCode()) {
          RecordWriteStub::Patch(Code::cast(e), mode);
        }
      }
    }
  }
}

447
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
448
  if (FLAG_trace_incremental_marking) {
449 450 451 452
    int old_generation_size_mb =
        static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
    int old_generation_limit_mb =
        static_cast<int>(heap()->old_generation_allocation_limit() / MB);
453
    heap()->isolate()->PrintWithTimestamp(
454 455
        "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
        "slack %dMB\n",
456 457
        Heap::GarbageCollectionReasonToString(gc_reason),
        old_generation_size_mb, old_generation_limit_mb,
458
        Max(0, old_generation_limit_mb - old_generation_size_mb));
459
  }
460 461 462 463
  DCHECK(FLAG_incremental_marking);
  DCHECK(state_ == STOPPED);
  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
  DCHECK(!heap_->isolate()->serializer_enabled());
464

465 466 467 468
  Counters* counters = heap_->isolate()->counters();

  counters->incremental_marking_reason()->AddSample(
      static_cast<int>(gc_reason));
469
  HistogramTimerScope incremental_marking_scope(
470
      counters->gc_incremental_marking_start());
471
  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
472
  heap_->tracer()->NotifyIncrementalMarkingStart();
473

474 475 476 477 478 479
  start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
  initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
  old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
  bytes_allocated_ = 0;
  bytes_marked_ahead_of_schedule_ = 0;
  should_hurry_ = false;
480 481
  was_activated_ = true;

482
  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
483
    StartMarking();
484 485
  } else {
    if (FLAG_trace_incremental_marking) {
486 487
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Start sweeping.\n");
488 489 490 491
    }
    state_ = SWEEPING;
  }

492 493 494 495 496 497 498 499 500 501
  SpaceIterator it(heap_);
  while (it.has_next()) {
    Space* space = it.next();
    if (space == heap_->new_space()) {
      space->AddAllocationObserver(&new_generation_observer_);
    } else {
      space->AddAllocationObserver(&old_generation_observer_);
    }
  }

502
  incremental_marking_job()->Start(heap_);
503 504 505
}


506
void IncrementalMarking::StartMarking() {
hpayer's avatar
hpayer committed
507 508 509 510 511
  if (heap_->isolate()->serializer_enabled()) {
    // Black allocation currently starts when we start incremental marking,
    // but we cannot enable black allocation while deserializing. Hence, we
    // have to delay the start of incremental marking in that case.
    if (FLAG_trace_incremental_marking) {
512 513
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Start delayed - serializer\n");
hpayer's avatar
hpayer committed
514 515 516
    }
    return;
  }
517
  if (FLAG_trace_incremental_marking) {
518 519
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Start marking\n");
520 521
  }

522 523
  is_compacting_ =
      !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
524 525 526

  state_ = MARKING;

527
  {
528 529
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
530
    heap_->local_embedder_heap_tracer()->TracePrologue();
531 532
  }

533 534 535
  RecordWriteStub::Mode mode = is_compacting_
                                   ? RecordWriteStub::INCREMENTAL_COMPACTION
                                   : RecordWriteStub::INCREMENTAL;
536 537 538

  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);

ulan's avatar
ulan committed
539
  heap_->mark_compact_collector()->marking_deque()->StartUsing();
540 541 542

  ActivateIncrementalWriteBarrier();

543
// Marking bits are cleared by the sweeper.
544
#ifdef VERIFY_HEAP
545
  if (FLAG_verify_heap) {
546 547
    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
  }
548 549 550 551 552 553
#endif

  heap_->CompletelyClearInstanceofCache();
  heap_->isolate()->compilation_cache()->MarkCompactPrologue();

  // Mark strong roots grey.
554
  IncrementalMarkingRootMarkingVisitor visitor(this);
555 556 557 558
  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);

  // Ready to start incremental marking.
  if (FLAG_trace_incremental_marking) {
559
    heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
560 561 562
  }
}

hpayer's avatar
hpayer committed
563 564 565 566
void IncrementalMarking::StartBlackAllocation() {
  DCHECK(FLAG_black_allocation);
  DCHECK(IsMarking());
  black_allocation_ = true;
567 568 569
  heap()->old_space()->MarkAllocationInfoBlack();
  heap()->map_space()->MarkAllocationInfoBlack();
  heap()->code_space()->MarkAllocationInfoBlack();
hpayer's avatar
hpayer committed
570
  if (FLAG_trace_incremental_marking) {
571 572
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Black allocation started\n");
hpayer's avatar
hpayer committed
573 574 575 576
  }
}

void IncrementalMarking::FinishBlackAllocation() {
577 578 579
  if (black_allocation_) {
    black_allocation_ = false;
    if (FLAG_trace_incremental_marking) {
580 581
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Black allocation finished\n");
582
    }
hpayer's avatar
hpayer committed
583 584
  }
}
585

586 587 588 589 590 591 592
void IncrementalMarking::AbortBlackAllocation() {
  if (FLAG_trace_incremental_marking) {
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Black allocation aborted\n");
  }
}

593 594
void IncrementalMarking::MarkRoots() {
  DCHECK(!finalize_marking_completed_);
595
  DCHECK(IsMarking());
596

597 598 599
  IncrementalMarkingRootMarkingVisitor visitor(this);
  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
}
600

601 602

void IncrementalMarking::MarkObjectGroups() {
603 604 605
  TRACE_GC(heap_->tracer(),
           GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);

606
  DCHECK(!heap_->local_embedder_heap_tracer()->InUse());
607 608
  DCHECK(!finalize_marking_completed_);
  DCHECK(IsMarking());
609 610

  IncrementalMarkingRootMarkingVisitor visitor(this);
611
  heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkGrey);
612 613
  heap_->isolate()->global_handles()->IterateObjectGroups(
      &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
614 615 616 617 618
  heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
  heap_->isolate()->global_handles()->RemoveObjectGroups();
}


619 620 621 622
void IncrementalMarking::ProcessWeakCells() {
  DCHECK(!finalize_marking_completed_);
  DCHECK(IsMarking());

623
  Object* the_hole_value = heap()->the_hole_value();
624
  Object* weak_cell_obj = heap()->encountered_weak_cells();
625
  Object* weak_cell_head = Smi::kZero;
626
  WeakCell* prev_weak_cell_obj = NULL;
627
  while (weak_cell_obj != Smi::kZero) {
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
    WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
    // We do not insert cleared weak cells into the list, so the value
    // cannot be a Smi here.
    HeapObject* value = HeapObject::cast(weak_cell->value());
    // Remove weak cells with live objects from the list, they do not need
    // clearing.
    if (MarkCompactCollector::IsMarked(value)) {
      // Record slot, if value is pointing to an evacuation candidate.
      Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
      heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
      // Remove entry somewhere after top.
      if (prev_weak_cell_obj != NULL) {
        prev_weak_cell_obj->set_next(weak_cell->next());
      }
      weak_cell_obj = weak_cell->next();
643
      weak_cell->clear_next(the_hole_value);
644
    } else {
645
      if (weak_cell_head == Smi::kZero) {
646 647 648 649 650 651 652 653 654 655 656
        weak_cell_head = weak_cell;
      }
      prev_weak_cell_obj = weak_cell;
      weak_cell_obj = weak_cell->next();
    }
  }
  // Top may have changed.
  heap()->set_encountered_weak_cells(weak_cell_head);
}


657 658 659 660 661 662 663
bool ShouldRetainMap(Map* map, int age) {
  if (age == 0) {
    // The map has aged. Do not retain this map.
    return false;
  }
  Object* constructor = map->GetConstructor();
  if (!constructor->IsHeapObject() ||
664 665
      Marking::IsWhite(
          ObjectMarking::MarkBitFrom(HeapObject::cast(constructor)))) {
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
    // The constructor is dead, no new objects with this map can
    // be created. Do not retain this map.
    return false;
  }
  return true;
}


void IncrementalMarking::RetainMaps() {
  // Do not retain dead maps if flag disables it or there is
  // - memory pressure (reduce_memory_footprint_),
  // - GC is requested by tests or dev-tools (abort_incremental_marking_).
  bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
                                   heap()->ShouldAbortIncrementalMarking() ||
                                   FLAG_retain_maps_for_n_gc == 0;
  ArrayList* retained_maps = heap()->retained_maps();
  int length = retained_maps->Length();
  // The number_of_disposed_maps separates maps in the retained_maps
  // array that were created before and after context disposal.
  // We do not age and retain disposed maps to avoid memory leaks.
  int number_of_disposed_maps = heap()->number_of_disposed_maps_;
  for (int i = 0; i < length; i += 2) {
    DCHECK(retained_maps->Get(i)->IsWeakCell());
    WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
    if (cell->cleared()) continue;
    int age = Smi::cast(retained_maps->Get(i + 1))->value();
    int new_age;
    Map* map = Map::cast(cell->value());
694
    MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
695 696 697
    if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
        Marking::IsWhite(map_mark)) {
      if (ShouldRetainMap(map, age)) {
698
        MarkGrey(heap(), map);
699 700 701
      }
      Object* prototype = map->prototype();
      if (age > 0 && prototype->IsHeapObject() &&
702 703
          Marking::IsWhite(
              ObjectMarking::MarkBitFrom(HeapObject::cast(prototype)))) {
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
        // The prototype is not marked, age the map.
        new_age = age - 1;
      } else {
        // The prototype and the constructor are marked, this map keeps only
        // transition tree alive, not JSObjects. Do not age the map.
        new_age = age;
      }
    } else {
      new_age = FLAG_retain_maps_for_n_gc;
    }
    // Compact the array and update the age.
    if (new_age != age) {
      retained_maps->Set(i + 1, Smi::FromInt(new_age));
    }
  }
}

721
void IncrementalMarking::FinalizeIncrementally() {
722
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
723 724 725
  DCHECK(!finalize_marking_completed_);
  DCHECK(IsMarking());

726 727
  double start = heap_->MonotonicallyIncreasingTimeInMs();

728 729 730 731 732 733 734
  int old_marking_deque_top =
      heap_->mark_compact_collector()->marking_deque()->top();

  // After finishing incremental marking, we try to discover all unmarked
  // objects to reduce the marking load in the final pause.
  // 1) We scan and mark the roots again to find all changes to the root set.
  // 2) We mark the object groups.
735 736
  // 3) Age and retain maps embedded in optimized code.
  // 4) Remove weak cell with live values from the list of weak cells, they
737
  // do not need processing during GC.
738
  MarkRoots();
739
  if (!heap_->local_embedder_heap_tracer()->InUse()) {
740 741
    MarkObjectGroups();
  }
742 743 744 745 746
  if (incremental_marking_finalization_rounds_ == 0) {
    // Map retaining is needed for perfromance, not correctness,
    // so we can do it only once at the beginning of the finalization.
    RetainMaps();
  }
747
  ProcessWeakCells();
748

749 750
  int marking_progress =
      abs(old_marking_deque_top -
751
          heap_->mark_compact_collector()->marking_deque()->top());
752

753 754
  marking_progress += static_cast<int>(
      heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
755

756 757 758
  double end = heap_->MonotonicallyIncreasingTimeInMs();
  double delta = end - start;
  if (FLAG_trace_incremental_marking) {
759
    heap()->isolate()->PrintWithTimestamp(
760 761 762 763 764 765
        "[IncrementalMarking] Finalize incrementally round %d, "
        "spent %d ms, marking progress %d.\n",
        static_cast<int>(delta), incremental_marking_finalization_rounds_,
        marking_progress);
  }

766 767 768 769 770 771
  ++incremental_marking_finalization_rounds_;
  if ((incremental_marking_finalization_rounds_ >=
       FLAG_max_incremental_marking_finalization_rounds) ||
      (marking_progress <
       FLAG_min_progress_during_incremental_marking_finalization)) {
    finalize_marking_completed_ = true;
772
  }
773

774 775 776 777 778
  if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
      !black_allocation_) {
    // TODO(hpayer): Move to an earlier point as soon as we make faster marking
    // progress.
    StartBlackAllocation();
779
  }
780 781 782
}


783 784 785
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
  if (!IsMarking()) return;

hpayer's avatar
hpayer committed
786 787 788 789 790 791
  MarkingDeque* marking_deque =
      heap_->mark_compact_collector()->marking_deque();
  int current = marking_deque->bottom();
  int mask = marking_deque->mask();
  int limit = marking_deque->top();
  HeapObject** array = marking_deque->array();
792 793 794 795 796 797
  int new_top = current;

  Map* filler_map = heap_->one_pointer_filler_map();

  while (current != limit) {
    HeapObject* obj = array[current];
798
    DCHECK(obj->IsHeapObject());
799
    current = ((current + 1) & mask);
800 801
    // Only pointers to from space have to be updated.
    if (heap_->InFromSpace(obj)) {
802
      MapWord map_word = obj->map_word();
803 804 805 806 807
      // There may be objects on the marking deque that do not exist anymore,
      // e.g. left trimmed objects or objects from the root set (frames).
      // If these object are dead at scavenging time, their marking deque
      // entries will not point to forwarding addresses. Hence, we can discard
      // them.
808 809
      if (map_word.IsForwardingAddress()) {
        HeapObject* dest = map_word.ToForwardingAddress();
810
        if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
hpayer's avatar
hpayer committed
811
          continue;
812 813
        array[new_top] = dest;
        new_top = ((new_top + 1) & mask);
hpayer's avatar
hpayer committed
814
        DCHECK(new_top != marking_deque->bottom());
815
#ifdef DEBUG
816
        MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
817
        DCHECK(Marking::IsGrey(mark_bit) ||
818 819
               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
#endif
820 821 822 823 824 825
      }
    } else if (obj->map() != filler_map) {
      // Skip one word filler objects that appear on the
      // stack when we perform in place array shift.
      array[new_top] = obj;
      new_top = ((new_top + 1) & mask);
hpayer's avatar
hpayer committed
826
      DCHECK(new_top != marking_deque->bottom());
827
#ifdef DEBUG
828
      MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
829 830 831 832 833
      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
      DCHECK(Marking::IsGrey(mark_bit) ||
             (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
             (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
              Marking::IsBlack(mark_bit)));
834
#endif
835 836
    }
  }
hpayer's avatar
hpayer committed
837
  marking_deque->set_top(new_top);
838 839 840
}


841
void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
842
  MarkGrey(heap_, map);
843 844 845

  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);

846
#if ENABLE_SLOW_DCHECKS
847
  MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
848
  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
849
  SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
850 851 852 853
              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
               Marking::IsBlack(mark_bit)));
#endif
854
  MarkBlack(obj, size);
855 856
}

857 858
void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
  MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
859
  if (Marking::IsWhite(mark_bit)) {
860
    heap->incremental_marking()->WhiteToGreyAndPush(object, mark_bit);
861 862 863
  }
}

864 865 866 867
void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
  MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
  if (Marking::IsBlack(mark_bit)) return;
  Marking::GreyToBlack(mark_bit);
868
  MemoryChunk::IncrementLiveBytes(obj, size);
869 870
}

871 872
intptr_t IncrementalMarking::ProcessMarkingDeque(
    intptr_t bytes_to_process, ForceCompletionAction completion) {
873
  intptr_t bytes_processed = 0;
hpayer's avatar
hpayer committed
874 875
  MarkingDeque* marking_deque =
      heap_->mark_compact_collector()->marking_deque();
876 877
  while (!marking_deque->IsEmpty() && (bytes_processed < bytes_to_process ||
                                       completion == FORCE_COMPLETION)) {
hpayer's avatar
hpayer committed
878
    HeapObject* obj = marking_deque->Pop();
879

880 881 882 883 884
    // Left trimming may result in white filler objects on the marking deque.
    // Ignore these objects.
    if (obj->IsFiller()) {
      DCHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(obj)) ||
             Marking::IsWhite(ObjectMarking::MarkBitFrom(obj)));
885
      continue;
886
    }
887

888
    Map* map = obj->map();
889
    int size = obj->SizeFromMap(map);
890
    unscanned_bytes_of_large_object_ = 0;
891
    VisitObject(map, obj, size);
jochen's avatar
jochen committed
892
    bytes_processed += size - unscanned_bytes_of_large_object_;
893
  }
894
  return bytes_processed;
895 896 897
}


898
void IncrementalMarking::Hurry() {
hpayer's avatar
hpayer committed
899 900 901 902 903 904
  // A scavenge may have pushed new objects on the marking deque (due to black
  // allocation) even in COMPLETE state. This may happen if scavenges are
  // forced e.g. in tests. It should not happen when COMPLETE was set when
  // incremental marking finished and a regular GC was triggered after that
  // because should_hurry_ will force a full GC.
  if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
905
    double start = 0.0;
906
    if (FLAG_trace_incremental_marking) {
907
      start = heap_->MonotonicallyIncreasingTimeInMs();
908
      if (FLAG_trace_incremental_marking) {
909
        heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
910
      }
911 912 913
    }
    // TODO(gc) hurry can mark objects it encounters black as mutator
    // was stopped.
914
    ProcessMarkingDeque(0, FORCE_COMPLETION);
915
    state_ = COMPLETE;
916
    if (FLAG_trace_incremental_marking) {
917
      double end = heap_->MonotonicallyIncreasingTimeInMs();
918 919
      double delta = end - start;
      if (FLAG_trace_incremental_marking) {
920 921 922
        heap()->isolate()->PrintWithTimestamp(
            "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
            static_cast<int>(delta));
923
      }
924
    }
925 926
  }

927
  Object* context = heap_->native_contexts_list();
928
  while (!context->IsUndefined(heap_->isolate())) {
929 930 931 932
    // GC can happen when the context is not fully initialized,
    // so the cache can be undefined.
    HeapObject* cache = HeapObject::cast(
        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
933
    if (!cache->IsUndefined(heap_->isolate())) {
934
      MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
935 936
      if (Marking::IsGrey(mark_bit)) {
        Marking::GreyToBlack(mark_bit);
937
        MemoryChunk::IncrementLiveBytes(cache, cache->Size());
938
      }
939
    }
940
    context = Context::cast(context)->next_context_link();
941 942 943 944
  }
}


945
void IncrementalMarking::Stop() {
946 947
  if (IsStopped()) return;
  if (FLAG_trace_incremental_marking) {
948 949 950 951 952 953 954 955 956
    int old_generation_size_mb =
        static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
    int old_generation_limit_mb =
        static_cast<int>(heap()->old_generation_allocation_limit() / MB);
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
        "overshoot %dMB\n",
        old_generation_size_mb, old_generation_limit_mb,
        Max(0, old_generation_size_mb - old_generation_limit_mb));
957
  }
958

959 960 961 962 963 964 965 966 967 968
  SpaceIterator it(heap_);
  while (it.has_next()) {
    Space* space = it.next();
    if (space == heap_->new_space()) {
      space->RemoveAllocationObserver(&new_generation_observer_);
    } else {
      space->RemoveAllocationObserver(&old_generation_observer_);
    }
  }

969 970 971 972 973 974
  IncrementalMarking::set_should_hurry(false);
  if (IsMarking()) {
    PatchIncrementalMarkingRecordWriteStubs(heap_,
                                            RecordWriteStub::STORE_BUFFER_ONLY);
    DeactivateIncrementalWriteBarrier();
  }
975
  heap_->isolate()->stack_guard()->ClearGC();
976 977
  state_ = STOPPED;
  is_compacting_ = false;
hpayer's avatar
hpayer committed
978
  FinishBlackAllocation();
979 980 981 982 983
}


void IncrementalMarking::Finalize() {
  Hurry();
984
  Stop();
985 986 987
}


988 989
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
  DCHECK(!finalize_marking_completed_);
990
  if (FLAG_trace_incremental_marking) {
991
    heap()->isolate()->PrintWithTimestamp(
992 993
        "[IncrementalMarking] requesting finalization of incremental "
        "marking.\n");
994
  }
995
  request_type_ = FINALIZATION;
996 997 998
  if (action == GC_VIA_STACK_GUARD) {
    heap_->isolate()->stack_guard()->RequestGC();
  }
999 1000 1001
}


1002
void IncrementalMarking::MarkingComplete(CompletionAction action) {
1003 1004 1005 1006 1007 1008 1009 1010
  state_ = COMPLETE;
  // We will set the stack guard to request a GC now.  This will mean the rest
  // of the GC gets performed as soon as possible (we can't do a GC here in a
  // record-write context).  If a few things get allocated between now and then
  // that shouldn't make us do a scavenge and keep being incremental, so we set
  // the should-hurry flag to indicate that there can't be much work left to do.
  set_should_hurry(true);
  if (FLAG_trace_incremental_marking) {
1011 1012
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Complete (normal).\n");
1013
  }
1014
  request_type_ = COMPLETE_MARKING;
1015
  if (action == GC_VIA_STACK_GUARD) {
1016 1017
    heap_->isolate()->stack_guard()->RequestGC();
  }
1018 1019 1020
}


1021 1022
void IncrementalMarking::Epilogue() {
  was_activated_ = false;
1023 1024
  finalize_marking_completed_ = false;
  incremental_marking_finalization_rounds_ = 0;
1025
}
1026

1027
double IncrementalMarking::AdvanceIncrementalMarking(
1028
    double deadline_in_ms, CompletionAction completion_action,
1029
    ForceCompletionAction force_completion, StepOrigin step_origin) {
1030 1031
  DCHECK(!IsStopped());

1032
  double remaining_time_in_ms = 0.0;
1033
  intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1034
      kStepSizeInMs,
1035
      heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1036 1037

  do {
1038
    Step(step_size_in_bytes, completion_action, force_completion, step_origin);
1039 1040
    remaining_time_in_ms =
        deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1041
  } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
1042
           !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1043 1044 1045 1046
  return remaining_time_in_ms;
}


1047 1048 1049
void IncrementalMarking::FinalizeSweeping() {
  DCHECK(state_ == SWEEPING);
  if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1050
      (!FLAG_concurrent_sweeping ||
1051
       !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
1052 1053 1054 1055 1056 1057
    heap_->mark_compact_collector()->EnsureSweepingCompleted();
  }
  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
    StartMarking();
  }
}
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
  // Update bytes_allocated_ based on the allocation counter.
  size_t current_counter = heap_->OldGenerationAllocationCounter();
  bytes_allocated_ += current_counter - old_generation_allocation_counter_;
  old_generation_allocation_counter_ = current_counter;
  return bytes_allocated_;
}

size_t IncrementalMarking::StepSizeToMakeProgress() {
  // We increase step size gradually based on the time passed in order to
  // leave marking work to standalone tasks. The ramp up duration and the
  // target step count are chosen based on benchmarks.
  const int kRampUpIntervalMs = 300;
  const size_t kTargetStepCount = 128;
1073 1074 1075 1076 1077 1078 1079
  const size_t kTargetStepCountAtOOM = 16;
  size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;

  if (heap()->IsCloseToOutOfMemory(oom_slack)) {
    return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
  }

1080 1081 1082 1083 1084 1085 1086 1087 1088
  size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
                         IncrementalMarking::kAllocatedThreshold);
  double time_passed_ms =
      heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
  double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
  return static_cast<size_t>(factor * step_size);
}

void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
1089
  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1090
      (state_ != SWEEPING && state_ != MARKING)) {
1091
    return;
1092 1093
  }

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
  size_t bytes_to_process =
      StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();

  if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
    // The first step after Scavenge will see many allocated bytes.
    // Cap the step size to distribute the marking work more uniformly.
    size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
        kMaxStepSizeInMs,
        heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
    bytes_to_process = Min(bytes_to_process, max_step_size);

    size_t bytes_processed = 0;
    if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
      // Steps performed in tasks have put us ahead of schedule.
      // We skip processing of marking dequeue here and thus
      // shift marking time from inside V8 to standalone tasks.
      bytes_marked_ahead_of_schedule_ -= bytes_to_process;
      bytes_processed = bytes_to_process;
    } else {
      bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
                             FORCE_COMPLETION, StepOrigin::kV8);
    }
    bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
1117 1118
  }
}
1119

1120 1121 1122 1123
size_t IncrementalMarking::Step(size_t bytes_to_process,
                                CompletionAction action,
                                ForceCompletionAction completion,
                                StepOrigin step_origin) {
1124 1125 1126 1127 1128
  HistogramTimerScope incremental_marking_scope(
      heap_->isolate()->counters()->gc_incremental_marking());
  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
  double start = heap_->MonotonicallyIncreasingTimeInMs();
1129

1130 1131 1132 1133
  if (state_ == SWEEPING) {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
    FinalizeSweeping();
  }
1134

1135
  size_t bytes_processed = 0;
1136 1137
  if (state_ == MARKING) {
    const bool incremental_wrapper_tracing =
1138 1139
        FLAG_incremental_marking_wrappers &&
        heap_->local_embedder_heap_tracer()->InUse();
1140 1141
    const bool process_wrappers =
        incremental_wrapper_tracing &&
1142 1143
        (heap_->local_embedder_heap_tracer()
             ->RequiresImmediateWrapperProcessing() ||
1144 1145 1146
         heap_->mark_compact_collector()->marking_deque()->IsEmpty());
    bool wrapper_work_left = incremental_wrapper_tracing;
    if (!process_wrappers) {
1147 1148 1149
      bytes_processed = ProcessMarkingDeque(bytes_to_process);
      if (step_origin == StepOrigin::kTask) {
        bytes_marked_ahead_of_schedule_ += bytes_processed;
1150
      }
1151 1152 1153 1154 1155
    } else {
      const double wrapper_deadline =
          heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
      TRACE_GC(heap()->tracer(),
               GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
1156
      wrapper_work_left = heap_->local_embedder_heap_tracer()->Trace(
1157 1158 1159
          wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
                                EmbedderHeapTracer::ForceCompletionAction::
                                    DO_NOT_FORCE_COMPLETION));
1160 1161 1162 1163 1164 1165 1166 1167
    }

    if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
        !wrapper_work_left) {
      if (completion == FORCE_COMPLETION ||
          IsIdleMarkingDelayCounterLimitReached()) {
        if (!finalize_marking_completed_) {
          FinalizeMarking(action);
1168
        } else {
1169
          MarkingComplete(action);
1170
        }
1171 1172
      } else {
        IncrementIdleMarkingDelayCounter();
1173
      }
1174
    }
1175
  }
1176

1177 1178 1179 1180 1181 1182
  double end = heap_->MonotonicallyIncreasingTimeInMs();
  double duration = (end - start);
  // Note that we report zero bytes here when sweeping was in progress or
  // when we just started incremental marking. In these cases we did not
  // process the marking deque.
  heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1183 1184
  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
1185 1186 1187
        "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
        step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
        bytes_to_process, duration);
1188
  }
1189
  return bytes_processed;
1190
}
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205


bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
  return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
}


void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
  idle_marking_delay_counter_++;
}


void IncrementalMarking::ClearIdleMarkingDelayCounter() {
  idle_marking_delay_counter_ = 0;
}
1206

1207 1208
}  // namespace internal
}  // namespace v8