marker.cc 22.2 KB
Newer Older
1 2 3 4 5 6
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/cppgc/marker.h"

7
#include <cstdint>
8 9
#include <memory>

10
#include "include/cppgc/heap-consistency.h"
11
#include "include/cppgc/platform.h"
12
#include "src/base/platform/time.h"
13 14
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
15
#include "src/heap/cppgc/heap-visitor.h"
16
#include "src/heap/cppgc/heap.h"
17
#include "src/heap/cppgc/liveness-broker.h"
18
#include "src/heap/cppgc/marking-state.h"
19
#include "src/heap/cppgc/marking-visitor.h"
20
#include "src/heap/cppgc/process-heap.h"
21
#include "src/heap/cppgc/stats-collector.h"
22
#include "src/heap/cppgc/write-barrier.h"
23

24 25 26 27
#if defined(CPPGC_CAGED_HEAP)
#include "include/cppgc/internal/caged-heap-local-data.h"
#endif

28 29 30
namespace cppgc {
namespace internal {

31
namespace {
32

33
bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
34
                                     HeapBase& heap) {
35 36 37
  if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
      config.marking_type ==
          Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
38
    WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
39
#if defined(CPPGC_CAGED_HEAP)
40
    heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
41
#endif
42 43 44
    return true;
  }
  return false;
45 46
}

47
bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
48
                                    HeapBase& heap) {
49 50 51
  if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
      config.marking_type ==
          Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
52
    WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
53
#if defined(CPPGC_CAGED_HEAP)
54
    heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
55
#endif
56 57 58
    return true;
  }
  return false;
59 60
}

61
// Visit remembered set that was recorded in the generational barrier.
Omer Katz's avatar
Omer Katz committed
62 63
void VisitRememberedSlots(HeapBase& heap,
                          MutatorMarkingState& mutator_marking_state) {
64
#if defined(CPPGC_YOUNG_GENERATION)
65
  StatsCollector::EnabledScope stats_scope(
66
      heap.stats_collector(), StatsCollector::kMarkVisitRememberedSets);
67 68 69 70 71 72 73 74
  for (void* slot : heap.remembered_slots()) {
    auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
                            ->ObjectHeaderFromInnerAddress(slot);
    if (slot_header.IsYoung()) continue;
    // The design of young generation requires collections to be executed at the
    // top level (with the guarantee that no objects are currently being in
    // construction). This can be ensured by running young GCs from safe points
    // or by reintroducing nested allocation scopes that avoid finalization.
75
    DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
76 77

    void* value = *reinterpret_cast<void**>(slot);
Omer Katz's avatar
Omer Katz committed
78
    mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
79 80 81 82 83
  }
#endif
}

// Assumes that all spaces have their LABs reset.
84
void ResetRememberedSet(HeapBase& heap) {
85 86 87 88 89 90 91
#if defined(CPPGC_YOUNG_GENERATION)
  auto& local_data = heap.caged_heap().local_data();
  local_data.age_table.Reset(&heap.caged_heap().allocator());
  heap.remembered_slots().clear();
#endif
}

92
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
93

94
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
95
          typename WorklistLocal, typename Callback>
Omer Katz's avatar
Omer Katz committed
96
bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
97 98
                                           size_t marked_bytes_deadline,
                                           v8::base::TimeTicks time_deadline,
99 100
                                           WorklistLocal& worklist_local,
                                           Callback callback) {
Omer Katz's avatar
Omer Katz committed
101
  return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
102 103 104 105
      [&marking_state, marked_bytes_deadline, time_deadline]() {
        return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
               (time_deadline <= v8::base::TimeTicks::Now());
      },
106
      worklist_local, callback);
107 108
}

109 110 111 112 113 114
size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
                                      HeapBase& heap) {
  return schedule.GetNextIncrementalStepDuration(
      heap.stats_collector()->allocated_object_size());
}

115 116
}  // namespace

117 118
constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration;

119 120 121 122 123
MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
    MarkerBase* marker, MarkingConfig::StackState stack_state)
    : marker_(marker),
      stack_state_(stack_state),
      handle_(Handle::NonEmptyTag{}) {}
124 125 126

// static
MarkerBase::IncrementalMarkingTask::Handle
127
MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
128
                                         MarkerBase* marker) {
129 130
  // Incremental GC is possible only via the GCInvoker, so getting here
  // guarantees that either non-nestable tasks or conservative stack
Michael Lippautz's avatar
Michael Lippautz committed
131
  // scanning are supported. This is required so that the incremental
132 133 134 135 136 137 138 139 140 141
  // task can safely finalize GC if needed.
  DCHECK_IMPLIES(marker->heap().stack_support() !=
                     HeapBase::StackSupport::kSupportsConservativeStackScan,
                 runner->NonNestableTasksEnabled());
  MarkingConfig::StackState stack_state_for_task =
      runner->NonNestableTasksEnabled()
          ? MarkingConfig::StackState::kNoHeapPointers
          : MarkingConfig::StackState::kMayContainHeapPointers;
  auto task =
      std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
142
  auto handle = task->handle_;
143 144 145 146 147
  if (runner->NonNestableTasksEnabled()) {
    runner->PostNonNestableTask(std::move(task));
  } else {
    runner->PostTask(std::move(task));
  }
148 149 150 151 152 153
  return handle;
}

void MarkerBase::IncrementalMarkingTask::Run() {
  if (handle_.IsCanceled()) return;

154
  StatsCollector::EnabledScope stats_scope(marker_->heap().stats_collector(),
155 156
                                           StatsCollector::kIncrementalMark);

157
  if (marker_->IncrementalMarkingStep(stack_state_)) {
158
    // Incremental marking is done so should finalize GC.
159
    marker_->heap().FinalizeIncrementalGarbageCollectionIfNeeded(stack_state_);
160 161 162
  }
}

163
MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
164
                       MarkingConfig config)
165
    : heap_(heap),
166 167 168
      config_(config),
      platform_(platform),
      foreground_task_runner_(platform_->GetForegroundTaskRunner()),
169 170
      mutator_marking_state_(heap, marking_worklists_,
                             heap.compactor().compaction_worklists()) {}
171 172

MarkerBase::~MarkerBase() {
173 174 175
  // The fixed point iteration may have found not-fully-constructed objects.
  // Such objects should have already been found through the stack scan though
  // and should thus already be marked.
176
  if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
177
#if DEBUG
178
    DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
179 180 181
    std::unordered_set<HeapObjectHeader*> objects =
        mutator_marking_state_.not_fully_constructed_worklist().Extract();
    for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
182
#else
183
    marking_worklists_.not_fully_constructed_worklist()->Clear();
184 185 186 187 188 189 190 191 192 193 194 195 196 197
#endif
  }

  // |discovered_ephemeron_pairs_worklist_| may still hold ephemeron pairs with
  // dead keys.
  if (!marking_worklists_.discovered_ephemeron_pairs_worklist()->IsEmpty()) {
#if DEBUG
    MarkingWorklists::EphemeronPairItem item;
    while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
        &item)) {
      DCHECK(!HeapObjectHeader::FromPayload(item.key).IsMarked());
    }
#else
    marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
198 199
#endif
  }
200 201

  marking_worklists_.weak_containers_worklist()->Clear();
202 203
}

204
void MarkerBase::StartMarking() {
205
  DCHECK(!is_marking_);
206
  StatsCollector::EnabledScope stats_scope(
207 208 209 210
      heap().stats_collector(),
      config_.marking_type == MarkingConfig::MarkingType::kAtomic
          ? StatsCollector::kAtomicMark
          : StatsCollector::kIncrementalMark);
211

Omer Katz's avatar
Omer Katz committed
212 213
  heap().stats_collector()->NotifyMarkingStarted(config_.collection_type,
                                                 config_.is_forced_gc);
214

215
  is_marking_ = true;
216
  if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
217
    StatsCollector::EnabledScope stats_scope(
218
        heap().stats_collector(), StatsCollector::kMarkIncrementalStart);
219

220
    // Performing incremental or concurrent marking.
221
    schedule_.NotifyIncrementalMarkingStart();
222 223 224
    // Scanning the stack is expensive so we only do it at the atomic pause.
    VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
    ScheduleIncrementalMarkingTask();
Omer Katz's avatar
Omer Katz committed
225 226
    if (config_.marking_type ==
        MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
227
      mutator_marking_state_.Publish();
Omer Katz's avatar
Omer Katz committed
228 229
      concurrent_marker_->Start();
    }
230
  }
231 232
}

233
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
234 235
  StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
                                               StatsCollector::kAtomicMark);
236
  StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
237
                                           StatsCollector::kMarkAtomicPrologue);
238

239
  if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
Omer Katz's avatar
Omer Katz committed
240 241 242
    // Cancel remaining concurrent/incremental tasks.
    concurrent_marker_->Cancel();
    incremental_marking_handle_.Cancel();
243 244 245
  }
  config_.stack_state = stack_state;
  config_.marking_type = MarkingConfig::MarkingType::kAtomic;
246

247 248 249 250 251 252
  // Lock guards against changes to {Weak}CrossThreadPersistent handles, that
  // may conflict with marking. E.g., a WeakCrossThreadPersistent may be
  // converted into a CrossThreadPersistent which requires that the handle
  // is either cleared or the object is retained.
  g_process_mutex.Pointer()->Lock();

253 254 255 256 257 258 259 260 261
  {
    // VisitRoots also resets the LABs.
    VisitRoots(config_.stack_state);
    if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
      mutator_marking_state_.FlushNotFullyConstructedObjects();
      DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
    } else {
      MarkNotFullyConstructedObjects();
    }
262
  }
263
}
264

265
void MarkerBase::LeaveAtomicPause() {
266 267 268 269 270 271 272 273 274 275 276 277
  {
    StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
                                                 StatsCollector::kAtomicMark);
    StatsCollector::EnabledScope stats_scope(
        heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
    DCHECK(!incremental_marking_handle_);
    ResetRememberedSet(heap());
    heap().stats_collector()->NotifyMarkingCompleted(
        // GetOverallMarkedBytes also includes concurrently marked bytes.
        schedule_.GetOverallMarkedBytes());
    is_marking_ = false;
  }
278 279
  {
    // Weakness callbacks are forbidden from allocating objects.
280
    cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_);
281 282
    ProcessWeakness();
  }
283 284
  // TODO(chromium:1056170): It would be better if the call to Unlock was
  // covered by some cppgc scope.
285
  g_process_mutex.Pointer()->Unlock();
286
  heap().SetStackStateOfPrevGC(config_.stack_state);
287 288
}

289
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
290
  DCHECK(is_marking_);
291
  EnterAtomicPause(stack_state);
292 293 294 295 296 297
  {
    StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
                                             StatsCollector::kAtomicMark);
    CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
    mutator_marking_state_.Publish();
  }
298 299 300
  LeaveAtomicPause();
}

301
void MarkerBase::ProcessWeakness() {
302
  DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
303

304 305
  StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
                                           StatsCollector::kAtomicWeak);
306

307
  heap().GetWeakPersistentRegion().Trace(&visitor());
308 309 310
  // Processing cross-thread handles requires taking the process lock.
  g_process_mutex.Get().AssertHeld();
  heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
311

312 313 314 315 316 317 318
  // Call weak callbacks on objects that may now be pointing to dead objects.
  MarkingWorklists::WeakCallbackItem item;
  LivenessBroker broker = LivenessBrokerFactory::Create();
  MarkingWorklists::WeakCallbackWorklist::Local& local =
      mutator_marking_state_.weak_callback_worklist();
  while (local.Pop(&item)) {
    item.callback(broker, item.parameter);
319
  }
320

321
  // Weak callbacks should not add any new objects for marking.
322
  DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
323 324
}

325
void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
326
  StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
327
                                           StatsCollector::kMarkVisitRoots);
328

329 330
  // Reset LABs before scanning roots. LABs are cleared to allow
  // ObjectStartBitmap handling without considering LABs.
331
  heap().object_allocator().ResetLinearAllocationBuffers();
332

333 334 335
  {
    {
      StatsCollector::DisabledScope inner_stats_scope(
336
          heap().stats_collector(), StatsCollector::kMarkVisitPersistents);
337 338 339 340
      heap().GetStrongPersistentRegion().Trace(&visitor());
    }
    if (config_.marking_type == MarkingConfig::MarkingType::kAtomic) {
      StatsCollector::DisabledScope inner_stats_scope(
341 342
          heap().stats_collector(),
          StatsCollector::kMarkVisitCrossThreadPersistents);
343 344 345
      g_process_mutex.Get().AssertHeld();
      heap().GetStrongCrossThreadPersistentRegion().Trace(&visitor());
    }
346
  }
347

348
  if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
349
    StatsCollector::DisabledScope stack_stats_scope(
350
        heap().stats_collector(), StatsCollector::kMarkVisitStack);
351
    heap().stack()->IteratePointers(&stack_visitor());
352
  }
353
  if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
354
    VisitRememberedSlots(heap(), mutator_marking_state_);
355
  }
356 357
}

358
void MarkerBase::ScheduleIncrementalMarkingTask() {
359 360
  DCHECK(platform_);
  if (!foreground_task_runner_ || incremental_marking_handle_) return;
361 362 363 364 365
  incremental_marking_handle_ =
      IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
}

bool MarkerBase::IncrementalMarkingStepForTesting(
366 367
    MarkingConfig::StackState stack_state) {
  return IncrementalMarkingStep(stack_state);
368 369
}

370
bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
371
  if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
372
    mutator_marking_state_.FlushNotFullyConstructedObjects();
373 374 375
  }
  config_.stack_state = stack_state;

376
  return AdvanceMarkingWithLimits();
377 378
}

Omer Katz's avatar
Omer Katz committed
379
void MarkerBase::AdvanceMarkingOnAllocation() {
380 381 382 383
  StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
                                           StatsCollector::kIncrementalMark);
  StatsCollector::EnabledScope nested_scope(heap().stats_collector(),
                                            StatsCollector::kMarkOnAllocation);
384
  if (AdvanceMarkingWithLimits()) {
385 386 387
    // Schedule another incremental task for finalizing without a stack.
    ScheduleIncrementalMarkingTask();
  }
388 389
}

390 391
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
                                          size_t marked_bytes_limit) {
392
  bool is_done = false;
393
  if (!main_marking_disabled_for_testing_) {
394
    if (marked_bytes_limit == 0) {
395 396 397
      marked_bytes_limit = mutator_marking_state_.marked_bytes() +
                           GetNextIncrementalStepDuration(schedule_, heap_);
    }
398
    StatsCollector::EnabledScope deadline_scope(
399 400 401
        heap().stats_collector(),
        StatsCollector::kMarkTransitiveClosureWithDeadline, "deadline_ms",
        max_duration.InMillisecondsF());
402
    is_done = ProcessWorklistsWithDeadline(
403
        marked_bytes_limit, v8::base::TimeTicks::Now() + max_duration);
404 405
    schedule_.UpdateMutatorThreadMarkedBytes(
        mutator_marking_state_.marked_bytes());
406
  }
Omer Katz's avatar
Omer Katz committed
407
  mutator_marking_state_.Publish();
408 409 410 411
  if (!is_done) {
    // If marking is atomic, |is_done| should always be true.
    DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
    ScheduleIncrementalMarkingTask();
Omer Katz's avatar
Omer Katz committed
412 413 414 415
    if (config_.marking_type ==
        MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
      concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
    }
416 417 418 419
  }
  return is_done;
}

420
bool MarkerBase::ProcessWorklistsWithDeadline(
421
    size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
422
  StatsCollector::EnabledScope stats_scope(
423
      heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
424
  do {
425 426 427 428 429
    if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
        schedule_.ShouldFlushEphemeronPairs()) {
      mutator_marking_state_.FlushDiscoveredEphemeronPairs();
    }

430 431 432
    // Bailout objects may be complicated to trace and thus might take longer
    // than other objects. Therefore we reduce the interval between deadline
    // checks to guarantee the deadline is not exceeded.
433 434
    {
      StatsCollector::EnabledScope inner_scope(
435
          heap().stats_collector(), StatsCollector::kMarkProcessBailOutObjects);
436 437 438 439 440 441 442 443 444 445 446
      if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
                                                 5>(
              mutator_marking_state_, marked_bytes_deadline, time_deadline,
              mutator_marking_state_.concurrent_marking_bailout_worklist(),
              [this](
                  const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
                mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
                item.callback(&visitor(), item.parameter);
              })) {
        return false;
      }
447 448
    }

449 450
    {
      StatsCollector::EnabledScope inner_scope(
451 452
          heap().stats_collector(),
          StatsCollector::kMarkProcessNotFullyconstructedWorklist);
453 454 455 456 457 458 459 460 461 462 463
      if (!DrainWorklistWithBytesAndTimeDeadline(
              mutator_marking_state_, marked_bytes_deadline, time_deadline,
              mutator_marking_state_
                  .previously_not_fully_constructed_worklist(),
              [this](HeapObjectHeader* header) {
                mutator_marking_state_.AccountMarkedBytes(*header);
                DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
                                                                     *header);
              })) {
        return false;
      }
464
    }
465

466 467
    {
      StatsCollector::EnabledScope inner_scope(
468 469
          heap().stats_collector(),
          StatsCollector::kMarkProcessMarkingWorklist);
470 471 472 473 474 475 476 477 478 479 480 481 482
      if (!DrainWorklistWithBytesAndTimeDeadline(
              mutator_marking_state_, marked_bytes_deadline, time_deadline,
              mutator_marking_state_.marking_worklist(),
              [this](const MarkingWorklists::MarkingItem& item) {
                const HeapObjectHeader& header =
                    HeapObjectHeader::FromPayload(item.base_object_payload);
                DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
                DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
                mutator_marking_state_.AccountMarkedBytes(header);
                item.callback(&visitor(), item.base_object_payload);
              })) {
        return false;
      }
483
    }
484

485 486
    {
      StatsCollector::EnabledScope inner_scope(
487 488
          heap().stats_collector(),
          StatsCollector::kMarkProcessWriteBarrierWorklist);
489 490 491 492 493 494 495 496 497 498
      if (!DrainWorklistWithBytesAndTimeDeadline(
              mutator_marking_state_, marked_bytes_deadline, time_deadline,
              mutator_marking_state_.write_barrier_worklist(),
              [this](HeapObjectHeader* header) {
                mutator_marking_state_.AccountMarkedBytes(*header);
                DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
                                                                     *header);
              })) {
        return false;
      }
499
    }
500

501 502
    {
      StatsCollector::EnabledScope stats_scope(
503
          heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
504 505 506 507
      if (!DrainWorklistWithBytesAndTimeDeadline(
              mutator_marking_state_, marked_bytes_deadline, time_deadline,
              mutator_marking_state_.ephemeron_pairs_for_processing_worklist(),
              [this](const MarkingWorklists::EphemeronPairItem& item) {
508 509
                mutator_marking_state_.ProcessEphemeron(
                    item.key, item.value, item.value_desc, visitor());
510 511 512
              })) {
        return false;
      }
513
    }
514
  } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
515 516 517
  return true;
}

518
void MarkerBase::MarkNotFullyConstructedObjects() {
519
  StatsCollector::DisabledScope stats_scope(
520 521
      heap().stats_collector(),
      StatsCollector::kMarkVisitNotFullyConstructedObjects);
522 523 524 525
  std::unordered_set<HeapObjectHeader*> objects =
      mutator_marking_state_.not_fully_constructed_worklist().Extract();
  for (HeapObjectHeader* object : objects) {
    DCHECK(object);
526 527 528
    // TraceConservativelyIfNeeded delegates to either in-construction or
    // fully constructed handling. Both handlers have their own marked bytes
    // accounting and markbit handling (bailout).
529
    conservative_visitor().TraceConservativelyIfNeeded(*object);
530 531 532
  }
}

533 534
void MarkerBase::ClearAllWorklistsForTesting() {
  marking_worklists_.ClearForTesting();
535 536
  auto* compaction_worklists = heap_.compactor().compaction_worklists();
  if (compaction_worklists) compaction_worklists->ClearForTesting();
537 538
}

539 540
void MarkerBase::SetMainThreadMarkingDisabledForTesting(bool value) {
  main_marking_disabled_for_testing_ = value;
541 542
}

543 544 545 546
void MarkerBase::WaitForConcurrentMarkingForTesting() {
  concurrent_marker_->JoinForTesting();
}

547 548 549 550 551 552 553
void MarkerBase::NotifyCompactionCancelled() {
  // Compaction cannot be cancelled while concurrent marking is active.
  DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
  DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
  mutator_marking_state_.NotifyCompactionCancelled();
}

554 555 556
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
               MarkingConfig config)
    : MarkerBase(key, heap, platform, config),
557 558
      marking_visitor_(heap, mutator_marking_state_),
      conservative_marking_visitor_(heap, mutator_marking_state_,
Omer Katz's avatar
Omer Katz committed
559 560 561 562
                                    marking_visitor_) {
  concurrent_marker_ = std::make_unique<ConcurrentMarker>(
      heap_, marking_worklists_, schedule_, platform_);
}
563

564 565
}  // namespace internal
}  // namespace cppgc