incremental-marking.cc 37.1 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/heap/incremental-marking.h"
6

7
#include "src/codegen/compilation-cache.h"
8
#include "src/execution/vm-state-inl.h"
9
#include "src/heap/array-buffer-sweeper.h"
10
#include "src/heap/concurrent-marking.h"
11
#include "src/heap/embedder-tracing.h"
12
#include "src/heap/gc-idle-time-handler.h"
13
#include "src/heap/gc-tracer.h"
14
#include "src/heap/heap-inl.h"
15
#include "src/heap/incremental-marking-inl.h"
16
#include "src/heap/mark-compact-inl.h"
17
#include "src/heap/marking-barrier.h"
18 19
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
20
#include "src/heap/memory-chunk.h"
21
#include "src/heap/object-stats.h"
22
#include "src/heap/objects-visiting-inl.h"
23
#include "src/heap/objects-visiting.h"
24
#include "src/heap/safepoint.h"
25
#include "src/heap/sweeper.h"
26
#include "src/init/v8.h"
27
#include "src/numbers/conversions.h"
28 29
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
30
#include "src/objects/hash-table-inl.h"
31
#include "src/objects/slots-inl.h"
32
#include "src/objects/transitions-inl.h"
33
#include "src/objects/visitors.h"
34
#include "src/tracing/trace-event.h"
35
#include "src/utils/utils.h"
36 37 38 39

namespace v8 {
namespace internal {

40 41
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
                                        size_t size) {
42
  Heap* heap = incremental_marking_->heap();
43
  VMState<GC> state(heap->isolate());
44
  RuntimeCallTimerScope runtime_timer(
45 46
      heap->isolate(),
      RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
47
  incremental_marking_->AdvanceOnAllocation();
48
  // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
49
  incremental_marking_->EnsureBlackAllocated(addr, size);
50 51
}

52 53
IncrementalMarking::IncrementalMarking(Heap* heap,
                                       WeakObjects* weak_objects)
54
    : heap_(heap),
55
      collector_(heap->mark_compact_collector()),
56
      weak_objects_(weak_objects),
57 58
      new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
      old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
59 60
  SetState(STOPPED);
}
61

62 63 64 65
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
    HeapObject obj) {
  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
66
  marking_state()->WhiteToGrey(obj);
67
  collector_->VisitObject(obj);
68 69
}

70 71 72 73 74 75 76
void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
  MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
  Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
  MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
  IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
}

77
void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
78
  DCHECK(IsMarking());
79 80
  DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
  DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
81
  DCHECK_NE(from, to);
82

83
  MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
84

85
  if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
86 87 88
    // Nothing to do if the object is in black area.
    return;
  }
89 90 91
  MarkBlackAndVisitObjectDueToLayoutChange(from);
  DCHECK(marking_state()->IsBlack(from));
  // Mark the new address as black.
92
  if (from.address() + kTaggedSize == to.address()) {
93 94 95 96 97 98 99 100
    // The old and the new markbits overlap. The |to| object has the
    // grey color. To make it black, we need to set the second bit.
    DCHECK(new_mark_bit.Get<kAtomicity>());
    new_mark_bit.Next().Set<kAtomicity>();
  } else {
    bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
    DCHECK(success);
    USE(success);
101
  }
102
  DCHECK(marking_state()->IsBlack(to));
103
}
104

105
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
106
 public:
107 108
  explicit IncrementalMarkingRootMarkingVisitor(
      IncrementalMarking* incremental_marking)
109
      : heap_(incremental_marking->heap()) {}
110

111
  void VisitRootPointer(Root root, const char* description,
112
                        FullObjectSlot p) override {
113 114
    MarkObjectByPointer(p);
  }
115

116 117 118
  void VisitRootPointers(Root root, const char* description,
                         FullObjectSlot start, FullObjectSlot end) override {
    for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
119 120 121
  }

 private:
122
  void MarkObjectByPointer(FullObjectSlot p) {
123
    Object obj = *p;
124
    if (!obj.IsHeapObject()) return;
125

126
    heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
127 128
  }

129
  Heap* heap_;
130 131 132
};


133 134 135
bool IncrementalMarking::WasActivated() { return was_activated_; }


136
bool IncrementalMarking::CanBeActivated() {
137 138 139
  // Only start incremental marking in a safe state: 1) when incremental
  // marking is turned on, 2) when we are currently not in a GC, and
  // 3) when we are currently not serializing or deserializing the heap.
140
  return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
141
         heap_->deserialization_complete() &&
142
         !heap_->isolate()->serializer_enabled();
143 144
}

145 146 147 148
bool IncrementalMarking::IsBelowActivationThresholds() const {
  return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
         heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
}
149

150
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
151
  if (FLAG_trace_incremental_marking) {
152 153 154 155 156 157
    const size_t old_generation_size_mb =
        heap()->OldGenerationSizeOfObjects() / MB;
    const size_t old_generation_limit_mb =
        heap()->old_generation_allocation_limit() / MB;
    const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
    const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
158
    heap()->isolate()->PrintWithTimestamp(
159 160
        "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
        "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
161 162
        Heap::GarbageCollectionReasonToString(gc_reason),
        old_generation_size_mb, old_generation_limit_mb,
163 164 165 166 167 168
        old_generation_size_mb > old_generation_limit_mb
            ? 0
            : old_generation_limit_mb - old_generation_size_mb,
        global_size_mb, global_limit_mb,
        global_size_mb > global_limit_mb ? 0
                                         : global_limit_mb - global_size_mb);
169
  }
170 171 172 173
  DCHECK(FLAG_incremental_marking);
  DCHECK(state_ == STOPPED);
  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
  DCHECK(!heap_->isolate()->serializer_enabled());
174

175 176 177 178
  Counters* counters = heap_->isolate()->counters();

  counters->incremental_marking_reason()->AddSample(
      static_cast<int>(gc_reason));
179
  HistogramTimerScope incremental_marking_scope(
180
      counters->gc_incremental_marking_start());
181
  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
182
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
183
  heap_->tracer()->NotifyIncrementalMarkingStart();
184

185
  start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
186
  time_to_force_completion_ = 0.0;
187
  initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
188
  old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
189 190 191
  bytes_marked_ = 0;
  scheduled_bytes_to_mark_ = 0;
  schedule_update_time_ms_ = start_time_ms_;
192
  bytes_marked_concurrently_ = 0;
193 194
  was_activated_ = true;

195 196 197 198 199 200
  {
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_INCREMENTAL_SWEEP_ARRAY_BUFFERS);
    heap_->array_buffer_sweeper()->EnsureFinished();
  }

201 202 203 204 205 206 207 208
  if (!collector_->sweeping_in_progress()) {
    StartMarking();
  } else {
    if (FLAG_trace_incremental_marking) {
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Start sweeping.\n");
    }
    SetState(SWEEPING);
209 210
  }

211 212
  heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
                                           &new_generation_observer_);
213
  incremental_marking_job()->Start(heap_);
214 215 216
}


217
void IncrementalMarking::StartMarking() {
hpayer's avatar
hpayer committed
218 219 220 221 222
  if (heap_->isolate()->serializer_enabled()) {
    // Black allocation currently starts when we start incremental marking,
    // but we cannot enable black allocation while deserializing. Hence, we
    // have to delay the start of incremental marking in that case.
    if (FLAG_trace_incremental_marking) {
223 224
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Start delayed - serializer\n");
hpayer's avatar
hpayer committed
225 226 227
    }
    return;
  }
228
  if (FLAG_trace_incremental_marking) {
229 230
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Start marking\n");
231
  }
232 233 234

  heap_->InvokeIncrementalMarkingPrologueCallbacks();

235
  is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
236
  collector_->StartMarking();
237

238
  SetState(MARKING);
239

240
  MarkingBarrier::ActivateAll(heap(), is_compacting_);
241 242 243

  heap_->isolate()->compilation_cache()->MarkCompactPrologue();

244
  StartBlackAllocation();
245

246
  MarkRoots();
247

248
  if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
249
    heap_->concurrent_marking()->ScheduleJob();
250 251
  }

252 253
  // Ready to start incremental marking.
  if (FLAG_trace_incremental_marking) {
254
    heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
255
  }
256 257 258 259 260 261

  {
    // TracePrologue may call back into V8 in corner cases, requiring that
    // marking (including write barriers) is fully set up.
    TRACE_GC(heap()->tracer(),
             GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
262 263
    heap_->local_embedder_heap_tracer()->TracePrologue(
        heap_->flags_for_embedder_tracer());
264
  }
265 266

  heap_->InvokeIncrementalMarkingEpilogueCallbacks();
267 268
}

hpayer's avatar
hpayer committed
269
void IncrementalMarking::StartBlackAllocation() {
270
  DCHECK(!black_allocation_);
hpayer's avatar
hpayer committed
271 272
  DCHECK(IsMarking());
  black_allocation_ = true;
273 274 275
  heap()->old_space()->MarkLinearAllocationAreaBlack();
  heap()->map_space()->MarkLinearAllocationAreaBlack();
  heap()->code_space()->MarkLinearAllocationAreaBlack();
276 277 278 279 280
  if (FLAG_local_heaps) {
    heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
      local_heap->MarkLinearAllocationAreaBlack();
    });
  }
hpayer's avatar
hpayer committed
281
  if (FLAG_trace_incremental_marking) {
282 283
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Black allocation started\n");
hpayer's avatar
hpayer committed
284 285 286
  }
}

287 288
void IncrementalMarking::PauseBlackAllocation() {
  DCHECK(IsMarking());
289 290 291
  heap()->old_space()->UnmarkLinearAllocationArea();
  heap()->map_space()->UnmarkLinearAllocationArea();
  heap()->code_space()->UnmarkLinearAllocationArea();
292 293 294 295 296
  if (FLAG_local_heaps) {
    heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
      local_heap->UnmarkLinearAllocationArea();
    });
  }
297 298 299 300 301 302 303
  if (FLAG_trace_incremental_marking) {
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Black allocation paused\n");
  }
  black_allocation_ = false;
}

hpayer's avatar
hpayer committed
304
void IncrementalMarking::FinishBlackAllocation() {
305 306 307
  if (black_allocation_) {
    black_allocation_ = false;
    if (FLAG_trace_incremental_marking) {
308 309
      heap()->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Black allocation finished\n");
310
    }
hpayer's avatar
hpayer committed
311 312
  }
}
313

314 315 316
void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
  if (black_allocation() && allocated != kNullAddress) {
    HeapObject object = HeapObject::FromAddress(allocated);
317
    if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
318 319 320 321 322 323 324 325 326 327
      if (heap_->IsLargeObject(object)) {
        marking_state()->WhiteToBlack(object);
      } else {
        Page::FromAddress(allocated)->CreateBlackArea(allocated,
                                                      allocated + size);
      }
    }
  }
}

328 329
void IncrementalMarking::MarkRoots() {
  DCHECK(!finalize_marking_completed_);
330
  DCHECK(IsMarking());
331

332
  IncrementalMarkingRootMarkingVisitor visitor(this);
333 334
  heap_->IterateRoots(
      &visitor, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kWeak});
335
}
336

337
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
338 339 340 341
  if (age == 0) {
    // The map has aged. Do not retain this map.
    return false;
  }
342 343
  Object constructor = map.GetConstructor();
  if (!constructor.IsHeapObject() ||
344
      marking_state()->IsWhite(HeapObject::cast(constructor))) {
345 346 347 348 349 350 351 352 353 354 355 356 357 358
    // The constructor is dead, no new objects with this map can
    // be created. Do not retain this map.
    return false;
  }
  return true;
}


void IncrementalMarking::RetainMaps() {
  // Do not retain dead maps if flag disables it or there is
  // - memory pressure (reduce_memory_footprint_),
  // - GC is requested by tests or dev-tools (abort_incremental_marking_).
  bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
                                   FLAG_retain_maps_for_n_gc == 0;
359 360 361 362 363 364 365 366 367 368
  std::vector<WeakArrayList> retained_maps_list = heap()->FindAllRetainedMaps();

  for (WeakArrayList retained_maps : retained_maps_list) {
    int length = retained_maps.length();

    for (int i = 0; i < length; i += 2) {
      MaybeObject value = retained_maps.Get(i);
      HeapObject map_heap_object;
      if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
        continue;
369
      }
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
      int age = retained_maps.Get(i + 1).ToSmi().value();
      int new_age;
      Map map = Map::cast(map_heap_object);
      if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
        if (ShouldRetainMap(map, age)) {
          WhiteToGreyAndPush(map);
        }
        Object prototype = map.prototype();
        if (age > 0 && prototype.IsHeapObject() &&
            marking_state()->IsWhite(HeapObject::cast(prototype))) {
          // The prototype is not marked, age the map.
          new_age = age - 1;
        } else {
          // The prototype and the constructor are marked, this map keeps only
          // transition tree alive, not JSObjects. Do not age the map.
          new_age = age;
        }
387
      } else {
388 389 390 391 392
        new_age = FLAG_retain_maps_for_n_gc;
      }
      // Compact the array and update the age.
      if (new_age != age) {
        retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
393 394 395 396 397
      }
    }
  }
}

398
void IncrementalMarking::FinalizeIncrementally() {
399
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
400 401 402
  DCHECK(!finalize_marking_completed_);
  DCHECK(IsMarking());

403 404
  double start = heap_->MonotonicallyIncreasingTimeInMs();

405 406 407
  // After finishing incremental marking, we try to discover all unmarked
  // objects to reduce the marking load in the final pause.
  // 1) We scan and mark the roots again to find all changes to the root set.
408
  // 2) Age and retain maps embedded in optimized code.
409
  MarkRoots();
410

411 412 413
  // Map retaining is needed for perfromance, not correctness,
  // so we can do it only once at the beginning of the finalization.
  RetainMaps();
414

415 416
  MarkingBarrier::PublishAll(heap());

417
  finalize_marking_completed_ = true;
418

419 420 421 422 423 424
  if (FLAG_trace_incremental_marking) {
    double end = heap_->MonotonicallyIncreasingTimeInMs();
    double delta = end - start;
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
  }
425 426
}

427
void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
428 429
  if (!IsMarking()) return;

430
  Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
431

432
#ifdef ENABLE_MINOR_MC
433 434
  MinorMarkCompactCollector::MarkingState* minor_marking_state =
      heap()->minor_mark_compact_collector()->marking_state();
435
#endif  // ENABLE_MINOR_MC
436

437
  collector_->local_marking_worklists()->Publish();
438
  MarkingBarrier::PublishAll(heap());
439
  collector_->marking_worklists()->Update(
440
      [
441
#ifdef DEBUG
442 443
          // this is referred inside DCHECK.
          this,
444
#endif
445 446 447 448
#ifdef ENABLE_MINOR_MC
          minor_marking_state,
#endif
          filler_map](HeapObject obj, HeapObject* out) -> bool {
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
        DCHECK(obj.IsHeapObject());
        // Only pointers to from space have to be updated.
        if (Heap::InFromPage(obj)) {
          MapWord map_word = obj.map_word();
          if (!map_word.IsForwardingAddress()) {
            // There may be objects on the marking deque that do not exist
            // anymore, e.g. left trimmed objects or objects from the root set
            // (frames). If these object are dead at scavenging time, their
            // marking deque entries will not point to forwarding addresses.
            // Hence, we can discard them.
            return false;
          }
          HeapObject dest = map_word.ToForwardingAddress();
          DCHECK_IMPLIES(marking_state()->IsWhite(obj),
                         obj.IsFreeSpaceOrFiller());
          *out = dest;
          return true;
        } else if (Heap::InToPage(obj)) {
          // The object may be on a large page or on a page that was moved in
          // new space.
          DCHECK(Heap::IsLargeObject(obj) ||
                 Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
471
#ifdef ENABLE_MINOR_MC
472 473 474
          if (minor_marking_state->IsWhite(obj)) {
            return false;
          }
475
#endif  // ENABLE_MINOR_MC
476 477 478 479 480 481 482 483
        // Either a large object or an object marked by the minor
        // mark-compactor.
          *out = obj;
          return true;
        } else {
          // The object may be on a page that was moved from new to old space.
          // Only applicable during minor MC garbage collections.
          if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
484
#ifdef ENABLE_MINOR_MC
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
            if (minor_marking_state->IsWhite(obj)) {
              return false;
            }
#endif  // ENABLE_MINOR_MC
            *out = obj;
            return true;
          }
          DCHECK_IMPLIES(marking_state()->IsWhite(obj),
                         obj.IsFreeSpaceOrFiller());
          // Skip one word filler objects that appear on the
          // stack when we perform in place array shift.
          if (obj.map() != filler_map) {
            *out = obj;
            return true;
          }
500
          return false;
501
        }
502
      });
503

504
  weak_objects_->UpdateAfterScavenge();
505 506
}

507 508 509
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
    size_t dead_bytes_in_new_space) {
  if (!IsMarking()) return;
510
  bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
511 512
}

513
void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
514
  if (IsMarking() && marking_state()->IsBlack(obj)) {
515
    collector_->RevisitObject(obj);
516 517 518
  }
}

519 520 521 522 523 524
StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
                                            double* duration_ms) {
  if (!ShouldDoEmbedderStep()) {
    *duration_ms = 0.0;
    return StepResult::kNoImmediateWork;
  }
525

526
  constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
527

528
  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
529
  LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
530
  const double start = heap_->MonotonicallyIncreasingTimeInMs();
531
  const double deadline = start + expected_duration_ms;
532
  bool empty_worklist;
533 534 535 536 537
  {
    LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
    HeapObject object;
    size_t cnt = 0;
    empty_worklist = true;
538
    while (local_marking_worklists()->PopEmbedder(&object)) {
539 540 541
      scope.TracePossibleWrapper(JSObject::cast(object));
      if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
        if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
542 543 544
          empty_worklist = false;
          break;
        }
545
        cnt = 0;
546 547
      }
    }
548
  }
549 550 551
  // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
  // which means |local_tracer| won't do any actual tracing, so there is no
  // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
552 553 554
  bool remote_tracing_done =
      local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
  double current = heap_->MonotonicallyIncreasingTimeInMs();
555
  local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
556
  *duration_ms = current - start;
557 558 559
  return (empty_worklist && remote_tracing_done)
             ? StepResult::kNoImmediateWork
             : StepResult::kMoreWorkRemaining;
560
}
561

562
void IncrementalMarking::Hurry() {
563
  if (!local_marking_worklists()->IsEmpty()) {
564
    double start = 0.0;
565
    if (FLAG_trace_incremental_marking) {
566
      start = heap_->MonotonicallyIncreasingTimeInMs();
567
      if (FLAG_trace_incremental_marking) {
568
        heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
569
      }
570
    }
571
    collector_->ProcessMarkingWorklist(0);
572
    SetState(COMPLETE);
573
    if (FLAG_trace_incremental_marking) {
574
      double end = heap_->MonotonicallyIncreasingTimeInMs();
575 576
      double delta = end - start;
      if (FLAG_trace_incremental_marking) {
577 578 579
        heap()->isolate()->PrintWithTimestamp(
            "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
            static_cast<int>(delta));
580
      }
581
    }
582 583 584 585
  }
}


586
void IncrementalMarking::Stop() {
587 588
  if (IsStopped()) return;
  if (FLAG_trace_incremental_marking) {
589
    int old_generation_size_mb =
590
        static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
591 592 593 594 595 596 597
    int old_generation_limit_mb =
        static_cast<int>(heap()->old_generation_allocation_limit() / MB);
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
        "overshoot %dMB\n",
        old_generation_size_mb, old_generation_limit_mb,
        Max(0, old_generation_size_mb - old_generation_limit_mb));
598
  }
599

600
  SpaceIterator it(heap_);
601 602
  while (it.HasNext()) {
    Space* space = it.Next();
603 604 605 606 607 608 609
    if (space == heap_->new_space()) {
      space->RemoveAllocationObserver(&new_generation_observer_);
    } else {
      space->RemoveAllocationObserver(&old_generation_observer_);
    }
  }

610
  heap_->isolate()->stack_guard()->ClearGC();
611
  SetState(STOPPED);
612
  is_compacting_ = false;
hpayer's avatar
hpayer committed
613
  FinishBlackAllocation();
614 615 616 617 618 619 620 621 622 623 624 625 626 627

  if (FLAG_local_heaps) {
    // Merge live bytes counters of background threads
    for (auto pair : background_live_bytes_) {
      MemoryChunk* memory_chunk = pair.first;
      intptr_t live_bytes = pair.second;

      if (live_bytes) {
        marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
      }
    }

    background_live_bytes_.clear();
  }
628 629 630 631 632
}


void IncrementalMarking::Finalize() {
  Hurry();
633
  Stop();
634 635 636
}


637 638
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
  DCHECK(!finalize_marking_completed_);
639
  if (FLAG_trace_incremental_marking) {
640
    heap()->isolate()->PrintWithTimestamp(
641 642
        "[IncrementalMarking] requesting finalization of incremental "
        "marking.\n");
643
  }
644
  request_type_ = FINALIZATION;
645 646 647
  if (action == GC_VIA_STACK_GUARD) {
    heap_->isolate()->stack_guard()->RequestGC();
  }
648 649
}

650 651 652 653 654
double IncrementalMarking::CurrentTimeToMarkingTask() const {
  const double recorded_time_to_marking_task =
      heap_->tracer()->AverageTimeToIncrementalMarkingTask();
  const double current_time_to_marking_task =
      incremental_marking_job_.CurrentTimeToTask(heap_);
655
  if (recorded_time_to_marking_task == 0.0) return 0.0;
656 657
  return Max(recorded_time_to_marking_task, current_time_to_marking_task);
}
658

659
void IncrementalMarking::MarkingComplete(CompletionAction action) {
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
  // Allowed overshoot percantage of incremental marking walltime.
  constexpr double kAllowedOvershoot = 0.1;
  // Minimum overshoot in ms. This is used to allow moving away from stack when
  // marking was fast.
  constexpr double kMinOvershootMs = 50;

  if (action == GC_VIA_STACK_GUARD) {
    if (time_to_force_completion_ == 0.0) {
      const double now = heap_->MonotonicallyIncreasingTimeInMs();
      const double overshoot_ms =
          Max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
      const double time_to_marking_task = CurrentTimeToMarkingTask();
      if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
        if (FLAG_trace_incremental_marking) {
          heap()->isolate()->PrintWithTimestamp(
              "[IncrementalMarking] Not delaying marking completion. time to "
              "task: %fms allowed overshoot: %fms\n",
              time_to_marking_task, overshoot_ms);
        }
      } else {
        time_to_force_completion_ = now + overshoot_ms;
        if (FLAG_trace_incremental_marking) {
          heap()->isolate()->PrintWithTimestamp(
              "[IncrementalMarking] Delaying GC via stack guard. time to task: "
              "%fms "
              "allowed overshoot: %fms\n",
              time_to_marking_task, overshoot_ms);
        }
688 689
        incremental_marking_job_.ScheduleTask(
            heap(), IncrementalMarkingJob::TaskType::kNormal);
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
        return;
      }
    }
    if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
      if (FLAG_trace_incremental_marking) {
        heap()->isolate()->PrintWithTimestamp(
            "[IncrementalMarking] Delaying GC via stack guard. time left: "
            "%fms\n",
            time_to_force_completion_ -
                heap_->MonotonicallyIncreasingTimeInMs());
      }
      return;
    }
  }

705
  SetState(COMPLETE);
706 707 708
  // We will set the stack guard to request a GC now.  This will mean the rest
  // of the GC gets performed as soon as possible (we can't do a GC here in a
  // record-write context).  If a few things get allocated between now and then
709
  // that shouldn't make us do a scavenge and keep being incremental.
710
  if (FLAG_trace_incremental_marking) {
711 712
    heap()->isolate()->PrintWithTimestamp(
        "[IncrementalMarking] Complete (normal).\n");
713
  }
714
  request_type_ = COMPLETE_MARKING;
715
  if (action == GC_VIA_STACK_GUARD) {
716 717
    heap_->isolate()->stack_guard()->RequestGC();
  }
718 719
}

720 721
void IncrementalMarking::Epilogue() {
  was_activated_ = false;
722
  finalize_marking_completed_ = false;
723
}
724

725 726 727 728 729
bool IncrementalMarking::ShouldDoEmbedderStep() {
  return state_ == MARKING && FLAG_incremental_marking_wrappers &&
         heap_->local_embedder_heap_tracer()->InUse();
}

730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
void IncrementalMarking::FastForwardSchedule() {
  if (scheduled_bytes_to_mark_ < bytes_marked_) {
    scheduled_bytes_to_mark_ = bytes_marked_;
    if (FLAG_trace_incremental_marking) {
      heap_->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Fast-forwarded schedule\n");
    }
  }
}

void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
  // Consider marking close to finalization if 75% of the initial old
  // generation was marked.
  if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
    FastForwardSchedule();
  }
}

void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
  // Time interval that should be sufficient to complete incremental marking.
  constexpr double kTargetMarkingWallTimeInMs = 500;
  constexpr double kMinTimeBetweenScheduleInMs = 10;
  if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
  double delta_ms =
      Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
  schedule_update_time_ms_ = time_ms;

  size_t bytes_to_mark =
      (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
  AddScheduledBytesToMark(bytes_to_mark);

  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
763 764
        "[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
        "%.1fms\n",
765 766 767 768 769 770
        bytes_to_mark / KB, delta_ms);
  }
}

namespace {
StepResult CombineStepResults(StepResult a, StepResult b) {
771 772
  DCHECK_NE(StepResult::kWaitingForFinalization, a);
  DCHECK_NE(StepResult::kWaitingForFinalization, b);
773 774 775 776
  if (a == StepResult::kMoreWorkRemaining ||
      b == StepResult::kMoreWorkRemaining)
    return StepResult::kMoreWorkRemaining;
  return StepResult::kNoImmediateWork;
777 778 779 780
}
}  // anonymous namespace

StepResult IncrementalMarking::AdvanceWithDeadline(
781
    double deadline_in_ms, CompletionAction completion_action,
782
    StepOrigin step_origin) {
783 784 785 786
  HistogramTimerScope incremental_marking_scope(
      heap_->isolate()->counters()->gc_incremental_marking());
  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
787 788
  DCHECK(!IsStopped());

789 790
  ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
  FastForwardScheduleIfCloseToFinalization();
791
  return Step(kStepSizeInMs, completion_action, step_origin);
792 793
}

794 795
void IncrementalMarking::FinalizeSweeping() {
  DCHECK(state_ == SWEEPING);
796 797 798 799 800 801 802 803
  if (ContinueConcurrentSweeping()) {
    if (FLAG_stress_incremental_marking) {
      // To start concurrent marking a bit earlier, support concurrent sweepers
      // from main thread by sweeping some pages.
      SupportConcurrentSweeping();
    }
    return;
  }
804

805
  SafepointScope scope(heap());
806 807
  collector_->EnsureSweepingCompleted();
  DCHECK(!collector_->sweeping_in_progress());
808
#ifdef DEBUG
809
  heap_->VerifyCountersAfterSweeping();
810
#endif
811 812 813 814 815 816 817
  StartMarking();
}

bool IncrementalMarking::ContinueConcurrentSweeping() {
  if (!collector_->sweeping_in_progress()) return false;
  return FLAG_concurrent_sweeping &&
         collector_->sweeper()->AreSweeperTasksRunning();
818 819
}

820 821 822 823
void IncrementalMarking::SupportConcurrentSweeping() {
  collector_->sweeper()->SupportConcurrentSweeping();
}

824 825 826
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
  // Update bytes_allocated_ based on the allocation counter.
  size_t current_counter = heap_->OldGenerationAllocationCounter();
827
  size_t result = current_counter - old_generation_allocation_counter_;
828
  old_generation_allocation_counter_ = current_counter;
829
  return result;
830 831 832
}

size_t IncrementalMarking::StepSizeToMakeProgress() {
833 834
  const size_t kTargetStepCount = 256;
  const size_t kTargetStepCountAtOOM = 32;
835
  const size_t kMaxStepSizeInByte = 256 * KB;
836 837
  size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;

838
  if (!heap()->CanExpandOldGeneration(oom_slack)) {
839
    return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
840 841
  }

842 843 844
  return Min(Max(initial_old_generation_size_ / kTargetStepCount,
                 IncrementalMarking::kMinStepSizeInBytes),
             kMaxStepSizeInByte);
845 846
}

847 848 849 850 851 852
void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
  if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
    // The overflow case.
    scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
  } else {
    scheduled_bytes_to_mark_ += bytes_to_mark;
853
  }
854
}
855

856 857 858 859 860
void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
  size_t progress_bytes = StepSizeToMakeProgress();
  size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
  size_t bytes_to_mark = progress_bytes + allocation_bytes;
  AddScheduledBytesToMark(bytes_to_mark);
861

862 863
  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
864 865
        "[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
        "(progress=%zuKB, allocation=%zuKB)\n",
866
        bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
867 868 869
  }
}

870
void IncrementalMarking::FetchBytesMarkedConcurrently() {
871 872 873 874 875 876
  if (FLAG_concurrent_marking) {
    size_t current_bytes_marked_concurrently =
        heap()->concurrent_marking()->TotalMarkedBytes();
    // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
    // short period of time when a concurrent marking task is finishing.
    if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
877
      bytes_marked_ +=
878 879 880
          current_bytes_marked_concurrently - bytes_marked_concurrently_;
      bytes_marked_concurrently_ = current_bytes_marked_concurrently;
    }
881 882
    if (FLAG_trace_incremental_marking) {
      heap_->isolate()->PrintWithTimestamp(
883
          "[IncrementalMarking] Marked %zuKB on background threads\n",
884 885
          heap_->concurrent_marking()->TotalMarkedBytes() / KB);
    }
886
  }
887 888 889 890 891 892 893
}

size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
  FetchBytesMarkedConcurrently();
  if (FLAG_trace_incremental_marking) {
    if (scheduled_bytes_to_mark_ > bytes_marked_) {
      heap_->isolate()->PrintWithTimestamp(
894
          "[IncrementalMarking] Marker is %zuKB behind schedule\n",
895 896 897
          (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
    } else {
      heap_->isolate()->PrintWithTimestamp(
898
          "[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
899 900
          (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
    }
901
  }
902 903 904 905 906 907
  // Allow steps on allocation to get behind the schedule by small ammount.
  // This gives higher priority to steps in tasks.
  size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
  if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
    return 0;
  return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
908
}
909

910 911 912 913
void IncrementalMarking::AdvanceOnAllocation() {
  // Code using an AlwaysAllocateScope assumes that the GC state does not
  // change; that implies that no marking steps must be performed.
  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
914
      (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
915 916 917 918 919 920 921
    return;
  }
  HistogramTimerScope incremental_marking_scope(
      heap_->isolate()->counters()->gc_incremental_marking());
  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
  ScheduleBytesToMarkBasedOnAllocation();
922
  Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
923 924
}

925 926 927
StepResult IncrementalMarking::Step(double max_step_size_in_ms,
                                    CompletionAction action,
                                    StepOrigin step_origin) {
928
  double start = heap_->MonotonicallyIncreasingTimeInMs();
929

930 931 932 933 934
  if (state_ == SWEEPING) {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
    FinalizeSweeping();
  }

935 936 937 938 939
  StepResult combined_result = StepResult::kMoreWorkRemaining;
  size_t bytes_to_process = 0;
  size_t v8_bytes_processed = 0;
  double embedder_duration = 0.0;
  double embedder_deadline = 0.0;
940
  if (state_ == MARKING) {
941 942 943 944
    if (FLAG_concurrent_marking) {
      // It is safe to merge back all objects that were on hold to the shared
      // work list at Step because we are at a safepoint where all objects
      // are properly initialized.
945
      local_marking_worklists()->MergeOnHold();
946
    }
947 948 949

// Only print marking worklist in debug mode to save ~40KB of code size.
#ifdef DEBUG
950 951
    if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
        FLAG_trace_gc_verbose) {
952
      collector_->marking_worklists()->Print();
953
    }
954
#endif
955 956 957 958 959 960 961
    if (FLAG_trace_incremental_marking) {
      heap_->isolate()->PrintWithTimestamp(
          "[IncrementalMarking] Marking speed %.fKB/ms\n",
          heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
    }
    // The first step after Scavenge will see many allocated bytes.
    // Cap the step size to distribute the marking work more uniformly.
962 963
    const double marking_speed =
        heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
964
    size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
965
        max_step_size_in_ms, marking_speed);
966
    bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
967
    bytes_to_process = Max(bytes_to_process, kMinStepSizeInBytes);
968 969 970 971 972 973 974 975

    // Perform a single V8 and a single embedder step. In case both have been
    // observed as empty back to back, we can finalize.
    //
    // This ignores that case where the embedder finds new V8-side objects. The
    // assumption is that large graphs are well connected and can mostly be
    // processed on their own. For small graphs, helping is not necessary.
    v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
976
    StepResult v8_result = local_marking_worklists()->IsEmpty()
977 978 979 980
                               ? StepResult::kNoImmediateWork
                               : StepResult::kMoreWorkRemaining;
    StepResult embedder_result = StepResult::kNoImmediateWork;
    if (heap_->local_embedder_heap_tracer()->InUse()) {
981
      embedder_deadline =
982
          Min(max_step_size_in_ms,
983
              static_cast<double>(bytes_to_process) / marking_speed);
984 985 986
      // TODO(chromium:1056170): Replace embedder_deadline with bytes_to_process
      // after migrating blink to the cppgc library and after v8 can directly
      // push objects to Oilpan.
987 988
      embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
    }
989 990
    bytes_marked_ += v8_bytes_processed;
    combined_result = CombineStepResults(v8_result, embedder_result);
991

992 993 994 995 996 997 998 999 1000
    if (combined_result == StepResult::kNoImmediateWork) {
      if (!finalize_marking_completed_) {
        FinalizeMarking(action);
        FastForwardSchedule();
        combined_result = StepResult::kWaitingForFinalization;
        incremental_marking_job()->Start(heap_);
      } else {
        MarkingComplete(action);
        combined_result = StepResult::kWaitingForFinalization;
1001
      }
1002
    }
1003
    if (FLAG_concurrent_marking) {
1004
      local_marking_worklists()->ShareWork();
1005
      heap_->concurrent_marking()->RescheduleJobIfNeeded();
1006
    }
1007
  }
1008
  if (state_ == MARKING) {
1009 1010 1011 1012 1013
    // Note that we do not report any marked by in case of finishing sweeping as
    // we did not process the marking worklist.
    const double v8_duration =
        heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
    heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
1014
  }
1015 1016
  if (FLAG_trace_incremental_marking) {
    heap_->isolate()->PrintWithTimestamp(
1017 1018
        "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
        "in %.1f\n",
1019
        step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1020
        v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
1021
        embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
1022
  }
1023
  return combined_result;
1024
}
1025

1026 1027
}  // namespace internal
}  // namespace v8