Commit ac7af6bb authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Concurrent marking

This CL introduces concurrent marking to the cppgc library.
The CL includes:
(*) Split MarkingState to mutator thread and concurrent thread.
(*) Split MarkingVisitor to mutator thread and concurrent thread.
(*) Introduce ConcurrentMarker for managing concurrent marking.
(*) Update unified heap to support concurrent marking as well.

See slides 13 and 14 in the following link for class hierarchies:
https://docs.google.com/presentation/d/1uDiEjJ-f1VziBKmYcvpw2gglP47M53bwj1L-P__l9QY/

Bug: chromium:1056170
Change-Id: I6530c2b21613011a612773d36fbf37416c23c5e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2424348
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70352}
parent b83d0b80
......@@ -4367,6 +4367,8 @@ v8_source_set("cppgc_base") {
"include/cppgc/visitor.h",
"include/v8config.h",
"src/heap/cppgc/allocation.cc",
"src/heap/cppgc/concurrent-marker.cc",
"src/heap/cppgc/concurrent-marker.h",
"src/heap/cppgc/default-job.h",
"src/heap/cppgc/default-platform.cc",
"src/heap/cppgc/free-list.cc",
......
......@@ -14,6 +14,7 @@
#include "src/heap/base/stack.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
......@@ -63,6 +64,33 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
v8::Isolate* isolate_;
};
class UnifiedHeapConcurrentMarker
: public cppgc::internal::ConcurrentMarkerBase {
public:
UnifiedHeapConcurrentMarker(
cppgc::internal::HeapBase& heap,
cppgc::internal::MarkingWorklists& marking_worklists,
cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform,
UnifiedHeapMarkingState& unified_heap_marking_state)
: cppgc::internal::ConcurrentMarkerBase(
heap, marking_worklists, incremental_marking_schedule, platform),
unified_heap_marking_state_(unified_heap_marking_state) {}
std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
ConcurrentMarkingState&) const final;
private:
UnifiedHeapMarkingState& unified_heap_marking_state_;
};
std::unique_ptr<cppgc::Visitor>
UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
heap(), marking_state, unified_heap_marking_state_);
}
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
UnifiedHeapMarker(Key, Heap& v8_heap, cppgc::internal::HeapBase& cpp_heap,
......@@ -82,8 +110,8 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
}
private:
UnifiedHeapMarkingState unified_heap_mutator_marking_state_;
UnifiedHeapMarkingVisitor marking_visitor_;
UnifiedHeapMarkingState unified_heap_marking_state_;
MutatorUnifiedHeapMarkingVisitor marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
......@@ -92,11 +120,15 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap& v8_heap,
cppgc::Platform* platform,
MarkingConfig config)
: cppgc::internal::MarkerBase(key, heap, platform, config),
unified_heap_mutator_marking_state_(v8_heap),
unified_heap_marking_state_(v8_heap),
marking_visitor_(heap, mutator_marking_state_,
unified_heap_mutator_marking_state_),
unified_heap_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {}
marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_,
unified_heap_marking_state_);
}
void UnifiedHeapMarker::AddObject(void* object) {
mutator_marking_state_.MarkAndPush(
......
......@@ -35,6 +35,7 @@ class UnifiedHeapMarkingState {
};
void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) {
// TODO(chromium:1056170): Defer concurrent handling using the bailout.
heap_.RegisterExternallyReferencedObject(
JSMemberBaseExtractor::ObjectReference(ref));
}
......
......@@ -12,47 +12,59 @@
namespace v8 {
namespace internal {
UnifiedHeapMarkingVisitor::UnifiedHeapMarkingVisitor(
HeapBase& heap, MarkingState& marking_state,
UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
HeapBase& heap, MarkingStateBase& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
marking_state_(marking_state),
unified_heap_marking_state_(unified_heap_marking_state) {}
void UnifiedHeapMarkingVisitor::Visit(const void* object,
TraceDescriptor desc) {
void UnifiedHeapMarkingVisitorBase::Visit(const void* object,
TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc);
}
void UnifiedHeapMarkingVisitor::VisitWeak(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_member) {
void UnifiedHeapMarkingVisitorBase::VisitWeak(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member);
}
void UnifiedHeapMarkingVisitor::VisitRoot(const void* object,
TraceDescriptor desc) {
Visit(object, desc);
void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
}
void UnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
weak_root);
void UnifiedHeapMarkingVisitorBase::Visit(const internal::JSMemberBase& ref) {
unified_heap_marking_state_.MarkAndPush(ref);
}
void UnifiedHeapMarkingVisitor::RegisterWeakCallback(WeakCallback callback,
const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
HeapBase& heap, MutatorMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {}
void MutatorUnifiedHeapMarkingVisitor::VisitRoot(const void* object,
TraceDescriptor desc) {
this->Visit(object, desc);
}
void UnifiedHeapMarkingVisitor::Visit(const internal::JSMemberBase& ref) {
unified_heap_marking_state_.MarkAndPush(ref);
void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
static_cast<MutatorMarkingState&>(marking_state_)
.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
}
ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
HeapBase& heap, ConcurrentMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {}
} // namespace internal
} // namespace v8
......@@ -13,7 +13,9 @@
namespace cppgc {
namespace internal {
class MarkingState;
class ConcurrentMarkingState;
class MarkingStateBase;
class MutatorMarkingState;
} // namespace internal
} // namespace cppgc
......@@ -22,30 +24,58 @@ namespace internal {
using cppgc::TraceDescriptor;
using cppgc::WeakCallback;
using cppgc::internal::ConcurrentMarkingState;
using cppgc::internal::HeapBase;
using cppgc::internal::MarkingState;
using cppgc::internal::MarkingStateBase;
using cppgc::internal::MutatorMarkingState;
class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitor : public JSVisitor {
class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
public:
UnifiedHeapMarkingVisitor(HeapBase&, MarkingState&, UnifiedHeapMarkingState&);
~UnifiedHeapMarkingVisitor() override = default;
UnifiedHeapMarkingVisitorBase(HeapBase&, MarkingStateBase&,
UnifiedHeapMarkingState&);
~UnifiedHeapMarkingVisitorBase() override = default;
private:
protected:
// C++ handling.
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
void VisitRoot(const void*, TraceDescriptor) final;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
// JS handling.
void Visit(const internal::JSMemberBase& ref) final;
MarkingState& marking_state_;
MarkingStateBase& marking_state_;
UnifiedHeapMarkingState& unified_heap_marking_state_;
};
class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
UnifiedHeapMarkingState&);
~MutatorUnifiedHeapMarkingVisitor() override = default;
protected:
void VisitRoot(const void*, TraceDescriptor) final;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final;
};
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&,
UnifiedHeapMarkingState&);
~ConcurrentUnifiedHeapMarkingVisitor() override = default;
protected:
void VisitRoot(const void*, TraceDescriptor) final { UNREACHABLE(); }
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final {
UNREACHABLE();
}
};
} // namespace internal
} // namespace v8
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/concurrent-marker.h"
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
namespace internal {
namespace {
static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
0.5;
static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
bool DrainWorklistWithYielding(
JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
IncrementalMarkingSchedule& incremental_marking_schedule,
WorklistLocal& worklist_local, Callback callback) {
return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&incremental_marking_schedule, &marking_state, job_delegate]() {
incremental_marking_schedule.AddConcurrentlyMarkedBytes(
marking_state.RecentlyMarkedBytes());
return job_delegate->ShouldYield();
},
worklist_local, callback);
}
size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
return marking_worklists.marking_worklist()->Size() +
marking_worklists.write_barrier_worklist()->Size() +
marking_worklists.previously_not_fully_constructed_worklist()->Size();
}
// Checks whether worklists' global pools hold any segment a concurrent marker
// can steal. This is called before the concurrent marker holds any Locals, so
// no need to check local segments.
bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
return !marking_worklists.marking_worklist()->IsEmpty() ||
!marking_worklists.write_barrier_worklist()->IsEmpty() ||
!marking_worklists.previously_not_fully_constructed_worklist()
->IsEmpty();
}
class ConcurrentMarkingTask final : public v8::JobTask {
public:
explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
void Run(JobDelegate* delegate) final;
size_t GetMaxConcurrency(size_t) const final;
private:
void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
const ConcurrentMarkerBase& concurrent_marker_;
};
ConcurrentMarkingTask::ConcurrentMarkingTask(
ConcurrentMarkerBase& concurrent_marker)
: concurrent_marker_(concurrent_marker) {}
void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
ConcurrentMarkingState concurrent_marking_state(
concurrent_marker_.heap(), concurrent_marker_.marking_worklists());
std::unique_ptr<Visitor> concurrent_marking_visitor =
concurrent_marker_.CreateConcurrentMarkingVisitor(
concurrent_marking_state);
ProcessWorklists(job_delegate, concurrent_marking_state,
*concurrent_marking_visitor.get());
concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
concurrent_marking_state.RecentlyMarkedBytes());
concurrent_marking_state.Publish();
}
size_t ConcurrentMarkingTask::GetMaxConcurrency(
size_t current_worker_count) const {
return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
current_worker_count;
}
void ConcurrentMarkingTask::ProcessWorklists(
JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
Visitor& concurrent_marking_visitor) {
do {
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state
.previously_not_fully_constructed_worklist(),
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.marking_worklist(),
[&concurrent_marking_state, &concurrent_marking_visitor](
const MarkingWorklists::MarkingItem& item) {
BasePage::FromPayload(item.base_object_payload)
->SynchronizedLoad();
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<
HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
item.callback(&concurrent_marking_visitor,
item.base_object_payload);
concurrent_marking_state.AccountMarkedBytes(header);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.write_barrier_worklist(),
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) {
return;
}
} while (
!concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
}
} // namespace
ConcurrentMarkerBase::ConcurrentMarkerBase(
HeapBase& heap, MarkingWorklists& marking_worklists,
IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform)
: heap_(heap),
marking_worklists_(marking_worklists),
incremental_marking_schedule_(incremental_marking_schedule),
platform_(platform) {}
void ConcurrentMarkerBase::Start() {
if (!platform_) return;
concurrent_marking_handle_ =
platform_->PostJob(v8::TaskPriority::kUserVisible,
std::make_unique<ConcurrentMarkingTask>(*this));
}
void ConcurrentMarkerBase::Cancel() {
if (concurrent_marking_handle_) concurrent_marking_handle_->Cancel();
}
ConcurrentMarkerBase::~ConcurrentMarkerBase() {
CHECK_IMPLIES(concurrent_marking_handle_,
!concurrent_marking_handle_->IsRunning());
}
bool ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
DCHECK(concurrent_marking_handle_);
if (HasWorkForConcurrentMarking(marking_worklists_)) {
// Notifies the scheduler that max concurrency might have increased.
// This will adjust the number of markers if necessary.
IncreaseMarkingPriorityIfNeeded();
concurrent_marking_handle_->NotifyConcurrencyIncrease();
return false;
}
return concurrent_marking_handle_->IsCompleted();
}
void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
if (concurrent_marking_priority_increased_) return;
// If concurrent tasks aren't executed, it might delay GC finalization.
// As long as GC is active so is the write barrier, which incurs a
// performance cost. Marking is estimated to take overall
// |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
// concurrent marking tasks have not reported any progress (i.e. the
// concurrently marked bytes count as not changed) in over
// |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| (50%) of
// that expected duration, we increase the concurrent task priority
// for the duration of the current GC. This is meant to prevent the
// GC from exceeding it's expected end time.
size_t current_concurrently_marked_bytes_ =
incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
} else if ((v8::base::TimeTicks::Now() -
last_concurrently_marked_bytes_update_)
.InMilliseconds() >
kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
// TODO(chromium:1056170): Enable priority update after it is added to the
// platform.
// concurrent_marking_handle_.UpdatePriority(v8::TaskPriority::USER_BLOCKING);
concurrent_marking_priority_increased_ = true;
}
}
std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
}
} // namespace internal
} // namespace cppgc
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
#define V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/marking-worklists.h"
namespace cppgc {
namespace internal {
class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
public:
ConcurrentMarkerBase(HeapBase&, MarkingWorklists&,
IncrementalMarkingSchedule&, cppgc::Platform*);
virtual ~ConcurrentMarkerBase();
ConcurrentMarkerBase(const ConcurrentMarkerBase&) = delete;
ConcurrentMarkerBase& operator=(const ConcurrentMarkerBase&) = delete;
void Start();
void Cancel();
bool NotifyIncrementalMutatorStepCompleted();
HeapBase& heap() const { return heap_; }
MarkingWorklists& marking_worklists() const { return marking_worklists_; }
IncrementalMarkingSchedule& incremental_marking_schedule() const {
return incremental_marking_schedule_;
}
virtual std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
ConcurrentMarkingState&) const = 0;
protected:
void IncreaseMarkingPriorityIfNeeded();
private:
HeapBase& heap_;
MarkingWorklists& marking_worklists_;
IncrementalMarkingSchedule& incremental_marking_schedule_;
cppgc::Platform* const platform_;
// The job handle doubles as flag to denote concurrent marking was started.
std::unique_ptr<JobHandle> concurrent_marking_handle_{nullptr};
size_t last_concurrently_marked_bytes_ = 0;
v8::base::TimeTicks last_concurrently_marked_bytes_update_;
bool concurrent_marking_priority_increased_{false};
};
class V8_EXPORT_PRIVATE ConcurrentMarker : public ConcurrentMarkerBase {
public:
ConcurrentMarker(HeapBase& heap, MarkingWorklists& marking_worklists,
IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform)
: ConcurrentMarkerBase(heap, marking_worklists,
incremental_marking_schedule, platform) {}
std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
ConcurrentMarkingState&) const final;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
......@@ -32,9 +32,12 @@ void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}
size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() {
return incrementally_marked_bytes_ +
concurrently_marked_bytes_.load(std::memory_order_relaxed);
size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() const {
return incrementally_marked_bytes_ + GetConcurrentlyMarkedBytes();
}
size_t IncrementalMarkingSchedule::GetConcurrentlyMarkedBytes() const {
return concurrently_marked_bytes_.load(std::memory_order_relaxed);
}
double IncrementalMarkingSchedule::GetElapsedTimeInMs(
......
......@@ -26,7 +26,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
void UpdateIncrementalMarkedBytes(size_t);
void AddConcurrentlyMarkedBytes(size_t);
size_t GetOverallMarkedBytes();
size_t GetOverallMarkedBytes() const;
size_t GetConcurrentlyMarkedBytes() const;
size_t GetNextIncrementalStepDuration(size_t);
......
......@@ -55,7 +55,8 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
}
// Visit remembered set that was recorded in the generational barrier.
void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
void VisitRememberedSlots(HeapBase& heap,
MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION)
for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
......@@ -69,7 +70,7 @@ void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
!header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
void* value = *reinterpret_cast<void**>(slot);
marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
}
#endif
}
......@@ -85,33 +86,14 @@ void ResetRememberedSet(HeapBase& heap) {
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback, typename Predicate>
bool DrainWorklistWithDeadline(Predicate should_yield,
WorklistLocal& worklist_local,
Callback callback) {
size_t processed_callback_count = 0;
typename WorklistLocal::ItemType item;
while (worklist_local.Pop(&item)) {
callback(item);
if (processed_callback_count-- == 0) {
if (should_yield()) {
return false;
}
processed_callback_count = kDeadlineCheckInterval;
}
}
return true;
}
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline,
WorklistLocal& worklist_local,
Callback callback) {
return DrainWorklistWithDeadline(
return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&marking_state, marked_bytes_deadline, time_deadline]() {
return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
(time_deadline <= v8::base::TimeTicks::Now());
......@@ -119,15 +101,6 @@ bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
worklist_local, callback);
}
void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
DCHECK(header);
DCHECK(!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
const GCInfo& gcinfo =
GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
gcinfo.trace(visitor, header->Payload());
}
size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
HeapBase& heap) {
return schedule.GetNextIncrementalStepDuration(
......@@ -204,6 +177,7 @@ MarkerBase::~MarkerBase() {
}
void MarkerBase::StartMarking() {
DCHECK(!is_marking_started_);
heap().stats_collector()->NotifyMarkingStarted();
is_marking_started_ = true;
......@@ -213,13 +187,18 @@ void MarkerBase::StartMarking() {
// Scanning the stack is expensive so we only do it at the atomic pause.
VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
concurrent_marker_->Start();
}
}
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
// Cancel remaining incremental tasks.
if (incremental_marking_handle_) incremental_marking_handle_.Cancel();
// Cancel remaining concurrent/incremental tasks.
concurrent_marker_->Cancel();
incremental_marking_handle_.Cancel();
}
config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic;
......@@ -246,8 +225,8 @@ void MarkerBase::LeaveAtomicPause() {
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_started_);
EnterAtomicPause(stack_state);
ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
v8::base::TimeTicks::Max());
CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
v8::base::TimeTicks::Max()));
mutator_marking_state_.Publish();
LeaveAtomicPause();
}
......@@ -302,13 +281,11 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
return AdvanceMarkingWithDeadline();
}
bool MarkerBase::AdvanceMarkingOnAllocation() {
bool is_done = AdvanceMarkingWithDeadline();
if (is_done) {
void MarkerBase::AdvanceMarkingOnAllocation() {
if (AdvanceMarkingWithDeadline()) {
// Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask();
}
return is_done;
}
bool MarkerBase::AdvanceMarkingWithMaxDuration(
......@@ -326,26 +303,28 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
v8::base::TimeTicks::Now() + max_duration);
}
schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes());
mutator_marking_state_.Publish();
if (!is_done) {
// If marking is atomic, |is_done| should always be true.
DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
}
}
mutator_marking_state_.Publish();
return is_done;
}
bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do {
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header);
})) {
return false;
......@@ -371,7 +350,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header);
})) {
return false;
......@@ -405,7 +385,10 @@ Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
: MarkerBase(key, heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {}
marking_visitor_) {
concurrent_marker_ = std::make_unique<ConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_);
}
} // namespace internal
} // namespace cppgc
......@@ -12,6 +12,7 @@
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/heap/base/worklist.h"
#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
......@@ -73,7 +74,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta);
// Makes marking progress when allocation a new lab.
bool AdvanceMarkingOnAllocation();
void AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed.
......@@ -94,7 +95,9 @@ class V8_EXPORT_PRIVATE MarkerBase {
HeapBase& heap() { return heap_; }
MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
MarkingState& MarkingStateForTesting() { return mutator_marking_state_; }
MutatorMarkingState& MutatorMarkingStateForTesting() {
return mutator_marking_state_;
}
cppgc::Visitor& VisitorForTesting() { return visitor(); }
void ClearAllWorklistsForTesting();
......@@ -163,11 +166,13 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingTask::Handle incremental_marking_handle_;
MarkingWorklists marking_worklists_;
MarkingState mutator_marking_state_;
bool is_marking_started_ = false;
MutatorMarkingState mutator_marking_state_;
bool is_marking_started_{false};
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool incremental_marking_disabled_for_testing_{false};
friend class MarkerFactory;
......@@ -201,7 +206,7 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
}
private:
MarkingVisitor marking_visitor_;
MutatorMarkingVisitor marking_visitor_;
ConservativeMarkingVisitor conservative_marking_visitor_;
};
......
......@@ -9,7 +9,7 @@
namespace cppgc {
namespace internal {
void MarkingState::FlushNotFullyConstructedObjects() {
void MutatorMarkingState::FlushNotFullyConstructedObjects() {
std::unordered_set<HeapObjectHeader*> objects =
not_fully_constructed_worklist_.Extract();
for (HeapObjectHeader* object : objects) {
......
......@@ -16,28 +16,19 @@ namespace cppgc {
namespace internal {
// C++ marking implementation.
class MarkingState {
class MarkingStateBase {
public:
inline MarkingState(HeapBase& heap, MarkingWorklists&);
inline MarkingStateBase(HeapBase& heap, MarkingWorklists&);
MarkingState(const MarkingState&) = delete;
MarkingState& operator=(const MarkingState&) = delete;
MarkingStateBase(const MarkingStateBase&) = delete;
MarkingStateBase& operator=(const MarkingStateBase&) = delete;
inline void MarkAndPush(const void*, TraceDescriptor);
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
inline void MarkAndPush(HeapObjectHeader&);
inline bool MarkNoPush(HeapObjectHeader&);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
inline void DynamicallyMarkAddress(ConstAddress);
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void RegisterWeakCallback(WeakCallback, const void*);
inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; }
......@@ -49,10 +40,6 @@ class MarkingState {
write_barrier_worklist_.Publish();
}
// Moves objects in not_fully_constructed_worklist_ to
// previously_not_full_constructed_worklists_.
void FlushNotFullyConstructedObjects();
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_;
}
......@@ -71,7 +58,11 @@ class MarkingState {
return write_barrier_worklist_;
}
private:
protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
inline bool MarkNoPush(HeapObjectHeader&);
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
......@@ -87,7 +78,8 @@ class MarkingState {
size_t marked_bytes_ = 0;
};
MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
MarkingStateBase::MarkingStateBase(HeapBase& heap,
MarkingWorklists& marking_worklists)
:
#ifdef DEBUG
heap_(heap),
......@@ -101,14 +93,15 @@ MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
}
void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) {
void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
DCHECK_NOT_NULL(object);
MarkAndPush(HeapObjectHeader::FromPayload(
const_cast<void*>(desc.base_object_payload)),
desc);
}
void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) {
void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
TraceDescriptor desc) {
DCHECK_NOT_NULL(desc.callback);
if (header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
......@@ -118,7 +111,7 @@ void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) {
}
}
bool MarkingState::MarkNoPush(HeapObjectHeader& header) {
bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
// A GC should only mark the objects that belong in its heap.
DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
......@@ -127,31 +120,17 @@ bool MarkingState::MarkNoPush(HeapObjectHeader& header) {
return header.TryMarkAtomic();
}
template <HeapObjectHeader::AccessMode mode>
void MarkingState::DynamicallyMarkAddress(ConstAddress address) {
HeapObjectHeader& header =
BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress<mode>(
const_cast<Address>(address));
DCHECK(!header.IsInConstruction<mode>());
if (MarkNoPush(header)) {
marking_worklist_.Push(
{reinterpret_cast<void*>(header.Payload()),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>())
.trace});
}
}
void MarkingState::MarkAndPush(HeapObjectHeader& header) {
void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
MarkAndPush(
header,
{header.Payload(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
void MarkingState::RegisterWeakReferenceIfNeeded(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* parameter) {
void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* parameter) {
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
......@@ -161,10 +140,48 @@ void MarkingState::RegisterWeakReferenceIfNeeded(const void* object,
RegisterWeakCallback(weak_callback, parameter);
}
void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* parameter) {
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>();
}
class MutatorMarkingState : public MarkingStateBase {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
: MarkingStateBase(heap, marking_worklists) {}
inline bool MarkNoPush(HeapObjectHeader& header) {
return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
}
inline void DynamicallyMarkAddress(ConstAddress);
// Moves objects in not_fully_constructed_worklist_ to
// previously_not_full_constructed_worklists_.
void FlushNotFullyConstructedObjects();
inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
};
void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
HeapObjectHeader& header =
BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
const_cast<Address>(address));
DCHECK(!header.IsInConstruction());
if (MarkNoPush(header)) {
marking_worklist_.Push(
{reinterpret_cast<void*>(header.Payload()),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
}
void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
const void* object, TraceDescriptor desc, WeakCallback weak_callback,
const void* parameter) {
// Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it.
#if DEBUG
......@@ -175,17 +192,56 @@ void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
void MarkingState::RegisterWeakCallback(WeakCallback callback,
const void* object) {
void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
const void* object) {
weak_callback_worklist_.Push({callback, object});
}
void MarkingState::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>();
class ConcurrentMarkingState : public MarkingStateBase {
public:
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
: MarkingStateBase(heap, marking_worklists) {}
~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
size_t RecentlyMarkedBytes() {
return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
}
private:
size_t last_marked_bytes_ = 0;
};
template <size_t deadline_check_interval, typename WorklistLocal,
typename Callback, typename Predicate>
bool DrainWorklistWithPredicate(Predicate should_yield,
WorklistLocal& worklist_local,
Callback callback) {
if (worklist_local.IsLocalAndGlobalEmpty()) return true;
// For concurrent markers, should_yield also reports marked bytes.
if (should_yield()) return false;
size_t processed_callback_count = deadline_check_interval;
typename WorklistLocal::ItemType item;
while (worklist_local.Pop(&item)) {
callback(item);
if (--processed_callback_count == 0) {
if (should_yield()) {
return false;
}
processed_callback_count = deadline_check_interval;
}
}
return true;
}
template <HeapObjectHeader::AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
DCHECK(header.IsMarked<mode>());
const GCInfo& gcinfo =
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>());
gcinfo.trace(&visitor, header.Payload());
}
} // namespace internal
......
......@@ -10,38 +10,28 @@
namespace cppgc {
namespace internal {
MarkingVisitor::MarkingVisitor(HeapBase& heap, MarkingState& marking_state)
MarkingVisitorBase::MarkingVisitorBase(HeapBase& heap,
MarkingStateBase& marking_state)
: marking_state_(marking_state) {}
void MarkingVisitor::Visit(const void* object, TraceDescriptor desc) {
void MarkingVisitorBase::Visit(const void* object, TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc);
}
void MarkingVisitor::VisitWeak(const void* object, TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_member) {
void MarkingVisitorBase::VisitWeak(const void* object, TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member);
}
void MarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc) {
Visit(object, desc);
}
void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
weak_root);
}
void MarkingVisitor::RegisterWeakCallback(WeakCallback callback,
const void* object) {
void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
const void* object) {
marking_state_.RegisterWeakCallback(callback, object);
}
ConservativeMarkingVisitor::ConservativeMarkingVisitor(
HeapBase& heap, MarkingState& marking_state, cppgc::Visitor& visitor)
HeapBase& heap, MutatorMarkingState& marking_state, cppgc::Visitor& visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), visitor),
marking_state_(marking_state) {}
......@@ -52,6 +42,27 @@ void ConservativeMarkingVisitor::VisitConservatively(
marking_state_.AccountMarkedBytes(header);
}
MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap,
MutatorMarkingState& marking_state)
: MarkingVisitorBase(heap, marking_state) {}
void MutatorMarkingVisitor::VisitRoot(const void* object,
TraceDescriptor desc) {
Visit(object, desc);
}
void MutatorMarkingVisitor::VisitWeakRoot(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
static_cast<MutatorMarkingState&>(marking_state_)
.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
}
ConcurrentMarkingVisitor::ConcurrentMarkingVisitor(
HeapBase& heap, ConcurrentMarkingState& marking_state)
: MarkingVisitorBase(heap, marking_state) {}
void ConservativeMarkingVisitor::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
......
......@@ -16,28 +16,51 @@ namespace internal {
class HeapBase;
class HeapObjectHeader;
class Marker;
class MarkingState;
class MarkingStateBase;
class MutatorMarkingState;
class ConcurrentMarkingState;
class V8_EXPORT_PRIVATE MarkingVisitor : public VisitorBase {
class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
public:
MarkingVisitor(HeapBase&, MarkingState&);
~MarkingVisitor() override = default;
MarkingVisitorBase(HeapBase&, MarkingStateBase&);
~MarkingVisitorBase() override = default;
protected:
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
MarkingStateBase& marking_state_;
};
class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
public:
MutatorMarkingVisitor(HeapBase&, MutatorMarkingState&);
~MutatorMarkingVisitor() override = default;
protected:
void VisitRoot(const void*, TraceDescriptor) final;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
};
class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase {
public:
ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&);
~ConcurrentMarkingVisitor() override = default;
MarkingState& marking_state_;
protected:
void VisitRoot(const void*, TraceDescriptor) final { UNREACHABLE(); }
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final {
UNREACHABLE();
}
};
class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
public heap::base::StackVisitor {
public:
ConservativeMarkingVisitor(HeapBase&, MarkingState&, cppgc::Visitor&);
ConservativeMarkingVisitor(HeapBase&, MutatorMarkingState&, cppgc::Visitor&);
~ConservativeMarkingVisitor() override = default;
private:
......@@ -45,7 +68,7 @@ class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
TraceConservativelyCallback) final;
void VisitPointer(const void*) final;
MarkingState& marking_state_;
MutatorMarkingState& marking_state_;
};
} // namespace internal
......
......@@ -44,19 +44,20 @@ class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
void Trace(cppgc::Visitor*) const override {}
};
class TestMarkingVisitor : public MarkingVisitor {
class TestMarkingVisitor : public MutatorMarkingVisitor {
public:
explicit TestMarkingVisitor(Marker* marker)
: MarkingVisitor(marker->heap(), marker->MarkingStateForTesting()) {}
: MutatorMarkingVisitor(marker->heap(),
marker->MutatorMarkingStateForTesting()) {}
~TestMarkingVisitor() { marking_state_.Publish(); }
MarkingState& marking_state() { return marking_state_; }
MarkingStateBase& marking_state() { return marking_state_; }
};
} // namespace
TEST_F(MarkingVisitorTest, MarkedBytesAreInitiallyZero) {
EXPECT_EQ(0u, GetMarker()->MarkingStateForTesting().marked_bytes());
EXPECT_EQ(0u, GetMarker()->MutatorMarkingStateForTesting().marked_bytes());
}
// Strong references are marked.
......
......@@ -43,9 +43,10 @@ class ExpectWriteBarrierFires final : private IncrementalMarkingScope {
ExpectWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker),
marking_worklist_(marker->MarkingStateForTesting().marking_worklist()),
marking_worklist_(
marker->MutatorMarkingStateForTesting().marking_worklist()),
write_barrier_worklist_(
marker->MarkingStateForTesting().write_barrier_worklist()),
marker->MutatorMarkingStateForTesting().write_barrier_worklist()),
objects_(objects) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
......@@ -92,9 +93,10 @@ class ExpectNoWriteBarrierFires final : private IncrementalMarkingScope {
ExpectNoWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker),
marking_worklist_(marker->MarkingStateForTesting().marking_worklist()),
marking_worklist_(
marker->MutatorMarkingStateForTesting().marking_worklist()),
write_barrier_worklist_(
marker->MarkingStateForTesting().write_barrier_worklist()) {
marker->MutatorMarkingStateForTesting().write_barrier_worklist()) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
for (void* object : objects) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment