Commit ac7af6bb authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Concurrent marking

This CL introduces concurrent marking to the cppgc library.
The CL includes:
(*) Split MarkingState to mutator thread and concurrent thread.
(*) Split MarkingVisitor to mutator thread and concurrent thread.
(*) Introduce ConcurrentMarker for managing concurrent marking.
(*) Update unified heap to support concurrent marking as well.

See slides 13 and 14 in the following link for class hierarchies:
https://docs.google.com/presentation/d/1uDiEjJ-f1VziBKmYcvpw2gglP47M53bwj1L-P__l9QY/

Bug: chromium:1056170
Change-Id: I6530c2b21613011a612773d36fbf37416c23c5e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2424348
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70352}
parent b83d0b80
...@@ -4367,6 +4367,8 @@ v8_source_set("cppgc_base") { ...@@ -4367,6 +4367,8 @@ v8_source_set("cppgc_base") {
"include/cppgc/visitor.h", "include/cppgc/visitor.h",
"include/v8config.h", "include/v8config.h",
"src/heap/cppgc/allocation.cc", "src/heap/cppgc/allocation.cc",
"src/heap/cppgc/concurrent-marker.cc",
"src/heap/cppgc/concurrent-marker.h",
"src/heap/cppgc/default-job.h", "src/heap/cppgc/default-job.h",
"src/heap/cppgc/default-platform.cc", "src/heap/cppgc/default-platform.cc",
"src/heap/cppgc/free-list.cc", "src/heap/cppgc/free-list.cc",
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "src/heap/base/stack.h" #include "src/heap/base/stack.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h" #include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h" #include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/gc-info-table.h" #include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-base.h" #include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h" #include "src/heap/cppgc/heap-object-header.h"
...@@ -63,6 +64,33 @@ class CppgcPlatformAdapter final : public cppgc::Platform { ...@@ -63,6 +64,33 @@ class CppgcPlatformAdapter final : public cppgc::Platform {
v8::Isolate* isolate_; v8::Isolate* isolate_;
}; };
class UnifiedHeapConcurrentMarker
: public cppgc::internal::ConcurrentMarkerBase {
public:
UnifiedHeapConcurrentMarker(
cppgc::internal::HeapBase& heap,
cppgc::internal::MarkingWorklists& marking_worklists,
cppgc::internal::IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform,
UnifiedHeapMarkingState& unified_heap_marking_state)
: cppgc::internal::ConcurrentMarkerBase(
heap, marking_worklists, incremental_marking_schedule, platform),
unified_heap_marking_state_(unified_heap_marking_state) {}
std::unique_ptr<cppgc::Visitor> CreateConcurrentMarkingVisitor(
ConcurrentMarkingState&) const final;
private:
UnifiedHeapMarkingState& unified_heap_marking_state_;
};
std::unique_ptr<cppgc::Visitor>
UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentUnifiedHeapMarkingVisitor>(
heap(), marking_state, unified_heap_marking_state_);
}
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase { class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public: public:
UnifiedHeapMarker(Key, Heap& v8_heap, cppgc::internal::HeapBase& cpp_heap, UnifiedHeapMarker(Key, Heap& v8_heap, cppgc::internal::HeapBase& cpp_heap,
...@@ -82,8 +110,8 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase { ...@@ -82,8 +110,8 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
} }
private: private:
UnifiedHeapMarkingState unified_heap_mutator_marking_state_; UnifiedHeapMarkingState unified_heap_marking_state_;
UnifiedHeapMarkingVisitor marking_visitor_; MutatorUnifiedHeapMarkingVisitor marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_; cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
}; };
...@@ -92,11 +120,15 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap& v8_heap, ...@@ -92,11 +120,15 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap& v8_heap,
cppgc::Platform* platform, cppgc::Platform* platform,
MarkingConfig config) MarkingConfig config)
: cppgc::internal::MarkerBase(key, heap, platform, config), : cppgc::internal::MarkerBase(key, heap, platform, config),
unified_heap_mutator_marking_state_(v8_heap), unified_heap_marking_state_(v8_heap),
marking_visitor_(heap, mutator_marking_state_, marking_visitor_(heap, mutator_marking_state_,
unified_heap_mutator_marking_state_), unified_heap_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_, conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {} marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_,
unified_heap_marking_state_);
}
void UnifiedHeapMarker::AddObject(void* object) { void UnifiedHeapMarker::AddObject(void* object) {
mutator_marking_state_.MarkAndPush( mutator_marking_state_.MarkAndPush(
......
...@@ -35,6 +35,7 @@ class UnifiedHeapMarkingState { ...@@ -35,6 +35,7 @@ class UnifiedHeapMarkingState {
}; };
void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) { void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) {
// TODO(chromium:1056170): Defer concurrent handling using the bailout.
heap_.RegisterExternallyReferencedObject( heap_.RegisterExternallyReferencedObject(
JSMemberBaseExtractor::ObjectReference(ref)); JSMemberBaseExtractor::ObjectReference(ref));
} }
......
...@@ -12,47 +12,59 @@ ...@@ -12,47 +12,59 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
UnifiedHeapMarkingVisitor::UnifiedHeapMarkingVisitor( UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
HeapBase& heap, MarkingState& marking_state, HeapBase& heap, MarkingStateBase& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state) UnifiedHeapMarkingState& unified_heap_marking_state)
: JSVisitor(cppgc::internal::VisitorFactory::CreateKey()), : JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
marking_state_(marking_state), marking_state_(marking_state),
unified_heap_marking_state_(unified_heap_marking_state) {} unified_heap_marking_state_(unified_heap_marking_state) {}
void UnifiedHeapMarkingVisitor::Visit(const void* object, void UnifiedHeapMarkingVisitorBase::Visit(const void* object,
TraceDescriptor desc) { TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc); marking_state_.MarkAndPush(object, desc);
} }
void UnifiedHeapMarkingVisitor::VisitWeak(const void* object, void UnifiedHeapMarkingVisitorBase::VisitWeak(const void* object,
TraceDescriptor desc, TraceDescriptor desc,
WeakCallback weak_callback, WeakCallback weak_callback,
const void* weak_member) { const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback, marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member); weak_member);
} }
void UnifiedHeapMarkingVisitor::VisitRoot(const void* object, void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
TraceDescriptor desc) { const void* object) {
Visit(object, desc); marking_state_.RegisterWeakCallback(callback, object);
} }
void UnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object, void UnifiedHeapMarkingVisitorBase::Visit(const internal::JSMemberBase& ref) {
TraceDescriptor desc, unified_heap_marking_state_.MarkAndPush(ref);
WeakCallback weak_callback,
const void* weak_root) {
marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
weak_root);
} }
void UnifiedHeapMarkingVisitor::RegisterWeakCallback(WeakCallback callback, MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
const void* object) { HeapBase& heap, MutatorMarkingState& marking_state,
marking_state_.RegisterWeakCallback(callback, object); UnifiedHeapMarkingState& unified_heap_marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {}
void MutatorUnifiedHeapMarkingVisitor::VisitRoot(const void* object,
TraceDescriptor desc) {
this->Visit(object, desc);
} }
void UnifiedHeapMarkingVisitor::Visit(const internal::JSMemberBase& ref) { void MutatorUnifiedHeapMarkingVisitor::VisitWeakRoot(const void* object,
unified_heap_marking_state_.MarkAndPush(ref); TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
static_cast<MutatorMarkingState&>(marking_state_)
.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
} }
ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
HeapBase& heap, ConcurrentMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
class MarkingState; class ConcurrentMarkingState;
class MarkingStateBase;
class MutatorMarkingState;
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
...@@ -22,30 +24,58 @@ namespace internal { ...@@ -22,30 +24,58 @@ namespace internal {
using cppgc::TraceDescriptor; using cppgc::TraceDescriptor;
using cppgc::WeakCallback; using cppgc::WeakCallback;
using cppgc::internal::ConcurrentMarkingState;
using cppgc::internal::HeapBase; using cppgc::internal::HeapBase;
using cppgc::internal::MarkingState; using cppgc::internal::MarkingStateBase;
using cppgc::internal::MutatorMarkingState;
class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitor : public JSVisitor { class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
public: public:
UnifiedHeapMarkingVisitor(HeapBase&, MarkingState&, UnifiedHeapMarkingState&); UnifiedHeapMarkingVisitorBase(HeapBase&, MarkingStateBase&,
~UnifiedHeapMarkingVisitor() override = default; UnifiedHeapMarkingState&);
~UnifiedHeapMarkingVisitorBase() override = default;
private: protected:
// C++ handling. // C++ handling.
void Visit(const void*, TraceDescriptor) final; void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final; void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
void VisitRoot(const void*, TraceDescriptor) final;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final;
void RegisterWeakCallback(WeakCallback, const void*) final; void RegisterWeakCallback(WeakCallback, const void*) final;
// JS handling. // JS handling.
void Visit(const internal::JSMemberBase& ref) final; void Visit(const internal::JSMemberBase& ref) final;
MarkingState& marking_state_; MarkingStateBase& marking_state_;
UnifiedHeapMarkingState& unified_heap_marking_state_; UnifiedHeapMarkingState& unified_heap_marking_state_;
}; };
class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
UnifiedHeapMarkingState&);
~MutatorUnifiedHeapMarkingVisitor() override = default;
protected:
void VisitRoot(const void*, TraceDescriptor) final;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final;
};
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&,
UnifiedHeapMarkingState&);
~ConcurrentUnifiedHeapMarkingVisitor() override = default;
protected:
void VisitRoot(const void*, TraceDescriptor) final { UNREACHABLE(); }
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final {
UNREACHABLE();
}
};
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/concurrent-marker.h"
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
namespace internal {
namespace {
static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
0.5;
static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
bool DrainWorklistWithYielding(
JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
IncrementalMarkingSchedule& incremental_marking_schedule,
WorklistLocal& worklist_local, Callback callback) {
return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&incremental_marking_schedule, &marking_state, job_delegate]() {
incremental_marking_schedule.AddConcurrentlyMarkedBytes(
marking_state.RecentlyMarkedBytes());
return job_delegate->ShouldYield();
},
worklist_local, callback);
}
size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
return marking_worklists.marking_worklist()->Size() +
marking_worklists.write_barrier_worklist()->Size() +
marking_worklists.previously_not_fully_constructed_worklist()->Size();
}
// Checks whether worklists' global pools hold any segment a concurrent marker
// can steal. This is called before the concurrent marker holds any Locals, so
// no need to check local segments.
bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
return !marking_worklists.marking_worklist()->IsEmpty() ||
!marking_worklists.write_barrier_worklist()->IsEmpty() ||
!marking_worklists.previously_not_fully_constructed_worklist()
->IsEmpty();
}
class ConcurrentMarkingTask final : public v8::JobTask {
public:
explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
void Run(JobDelegate* delegate) final;
size_t GetMaxConcurrency(size_t) const final;
private:
void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
const ConcurrentMarkerBase& concurrent_marker_;
};
ConcurrentMarkingTask::ConcurrentMarkingTask(
ConcurrentMarkerBase& concurrent_marker)
: concurrent_marker_(concurrent_marker) {}
void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
ConcurrentMarkingState concurrent_marking_state(
concurrent_marker_.heap(), concurrent_marker_.marking_worklists());
std::unique_ptr<Visitor> concurrent_marking_visitor =
concurrent_marker_.CreateConcurrentMarkingVisitor(
concurrent_marking_state);
ProcessWorklists(job_delegate, concurrent_marking_state,
*concurrent_marking_visitor.get());
concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
concurrent_marking_state.RecentlyMarkedBytes());
concurrent_marking_state.Publish();
}
size_t ConcurrentMarkingTask::GetMaxConcurrency(
size_t current_worker_count) const {
return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
current_worker_count;
}
void ConcurrentMarkingTask::ProcessWorklists(
JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
Visitor& concurrent_marking_visitor) {
do {
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state
.previously_not_fully_constructed_worklist(),
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.marking_worklist(),
[&concurrent_marking_state, &concurrent_marking_visitor](
const MarkingWorklists::MarkingItem& item) {
BasePage::FromPayload(item.base_object_payload)
->SynchronizedLoad();
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<
HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
item.callback(&concurrent_marking_visitor,
item.base_object_payload);
concurrent_marking_state.AccountMarkedBytes(header);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.write_barrier_worklist(),
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) {
return;
}
} while (
!concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
}
} // namespace
ConcurrentMarkerBase::ConcurrentMarkerBase(
HeapBase& heap, MarkingWorklists& marking_worklists,
IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform)
: heap_(heap),
marking_worklists_(marking_worklists),
incremental_marking_schedule_(incremental_marking_schedule),
platform_(platform) {}
void ConcurrentMarkerBase::Start() {
if (!platform_) return;
concurrent_marking_handle_ =
platform_->PostJob(v8::TaskPriority::kUserVisible,
std::make_unique<ConcurrentMarkingTask>(*this));
}
void ConcurrentMarkerBase::Cancel() {
if (concurrent_marking_handle_) concurrent_marking_handle_->Cancel();
}
ConcurrentMarkerBase::~ConcurrentMarkerBase() {
CHECK_IMPLIES(concurrent_marking_handle_,
!concurrent_marking_handle_->IsRunning());
}
bool ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
DCHECK(concurrent_marking_handle_);
if (HasWorkForConcurrentMarking(marking_worklists_)) {
// Notifies the scheduler that max concurrency might have increased.
// This will adjust the number of markers if necessary.
IncreaseMarkingPriorityIfNeeded();
concurrent_marking_handle_->NotifyConcurrencyIncrease();
return false;
}
return concurrent_marking_handle_->IsCompleted();
}
void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
if (concurrent_marking_priority_increased_) return;
// If concurrent tasks aren't executed, it might delay GC finalization.
// As long as GC is active so is the write barrier, which incurs a
// performance cost. Marking is estimated to take overall
// |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
// concurrent marking tasks have not reported any progress (i.e. the
// concurrently marked bytes count as not changed) in over
// |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| (50%) of
// that expected duration, we increase the concurrent task priority
// for the duration of the current GC. This is meant to prevent the
// GC from exceeding it's expected end time.
size_t current_concurrently_marked_bytes_ =
incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
} else if ((v8::base::TimeTicks::Now() -
last_concurrently_marked_bytes_update_)
.InMilliseconds() >
kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
// TODO(chromium:1056170): Enable priority update after it is added to the
// platform.
// concurrent_marking_handle_.UpdatePriority(v8::TaskPriority::USER_BLOCKING);
concurrent_marking_priority_increased_ = true;
}
}
std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
}
} // namespace internal
} // namespace cppgc
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
#define V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/marking-worklists.h"
namespace cppgc {
namespace internal {
class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
public:
ConcurrentMarkerBase(HeapBase&, MarkingWorklists&,
IncrementalMarkingSchedule&, cppgc::Platform*);
virtual ~ConcurrentMarkerBase();
ConcurrentMarkerBase(const ConcurrentMarkerBase&) = delete;
ConcurrentMarkerBase& operator=(const ConcurrentMarkerBase&) = delete;
void Start();
void Cancel();
bool NotifyIncrementalMutatorStepCompleted();
HeapBase& heap() const { return heap_; }
MarkingWorklists& marking_worklists() const { return marking_worklists_; }
IncrementalMarkingSchedule& incremental_marking_schedule() const {
return incremental_marking_schedule_;
}
virtual std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
ConcurrentMarkingState&) const = 0;
protected:
void IncreaseMarkingPriorityIfNeeded();
private:
HeapBase& heap_;
MarkingWorklists& marking_worklists_;
IncrementalMarkingSchedule& incremental_marking_schedule_;
cppgc::Platform* const platform_;
// The job handle doubles as flag to denote concurrent marking was started.
std::unique_ptr<JobHandle> concurrent_marking_handle_{nullptr};
size_t last_concurrently_marked_bytes_ = 0;
v8::base::TimeTicks last_concurrently_marked_bytes_update_;
bool concurrent_marking_priority_increased_{false};
};
class V8_EXPORT_PRIVATE ConcurrentMarker : public ConcurrentMarkerBase {
public:
ConcurrentMarker(HeapBase& heap, MarkingWorklists& marking_worklists,
IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform)
: ConcurrentMarkerBase(heap, marking_worklists,
incremental_marking_schedule, platform) {}
std::unique_ptr<Visitor> CreateConcurrentMarkingVisitor(
ConcurrentMarkingState&) const final;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_CONCURRENT_MARKER_H_
...@@ -32,9 +32,12 @@ void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes( ...@@ -32,9 +32,12 @@ void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed); concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
} }
size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() { size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() const {
return incrementally_marked_bytes_ + return incrementally_marked_bytes_ + GetConcurrentlyMarkedBytes();
concurrently_marked_bytes_.load(std::memory_order_relaxed); }
size_t IncrementalMarkingSchedule::GetConcurrentlyMarkedBytes() const {
return concurrently_marked_bytes_.load(std::memory_order_relaxed);
} }
double IncrementalMarkingSchedule::GetElapsedTimeInMs( double IncrementalMarkingSchedule::GetElapsedTimeInMs(
......
...@@ -26,7 +26,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule { ...@@ -26,7 +26,8 @@ class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
void UpdateIncrementalMarkedBytes(size_t); void UpdateIncrementalMarkedBytes(size_t);
void AddConcurrentlyMarkedBytes(size_t); void AddConcurrentlyMarkedBytes(size_t);
size_t GetOverallMarkedBytes(); size_t GetOverallMarkedBytes() const;
size_t GetConcurrentlyMarkedBytes() const;
size_t GetNextIncrementalStepDuration(size_t); size_t GetNextIncrementalStepDuration(size_t);
......
...@@ -55,7 +55,8 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -55,7 +55,8 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
} }
// Visit remembered set that was recorded in the generational barrier. // Visit remembered set that was recorded in the generational barrier.
void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) { void VisitRememberedSlots(HeapBase& heap,
MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
for (void* slot : heap.remembered_slots()) { for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot) auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
...@@ -69,7 +70,7 @@ void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) { ...@@ -69,7 +70,7 @@ void VisitRememberedSlots(HeapBase& heap, MarkingState& marking_state) {
!header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()); !header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
void* value = *reinterpret_cast<void**>(slot); void* value = *reinterpret_cast<void**>(slot);
marking_state.DynamicallyMarkAddress(static_cast<Address>(value)); mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
} }
#endif #endif
} }
...@@ -85,33 +86,14 @@ void ResetRememberedSet(HeapBase& heap) { ...@@ -85,33 +86,14 @@ void ResetRememberedSet(HeapBase& heap) {
static constexpr size_t kDefaultDeadlineCheckInterval = 150u; static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback, typename Predicate>
bool DrainWorklistWithDeadline(Predicate should_yield,
WorklistLocal& worklist_local,
Callback callback) {
size_t processed_callback_count = 0;
typename WorklistLocal::ItemType item;
while (worklist_local.Pop(&item)) {
callback(item);
if (processed_callback_count-- == 0) {
if (should_yield()) {
return false;
}
processed_callback_count = kDeadlineCheckInterval;
}
}
return true;
}
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval, template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback> typename WorklistLocal, typename Callback>
bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state, bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
size_t marked_bytes_deadline, size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline, v8::base::TimeTicks time_deadline,
WorklistLocal& worklist_local, WorklistLocal& worklist_local,
Callback callback) { Callback callback) {
return DrainWorklistWithDeadline( return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&marking_state, marked_bytes_deadline, time_deadline]() { [&marking_state, marked_bytes_deadline, time_deadline]() {
return (marked_bytes_deadline <= marking_state.marked_bytes()) || return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
(time_deadline <= v8::base::TimeTicks::Now()); (time_deadline <= v8::base::TimeTicks::Now());
...@@ -119,15 +101,6 @@ bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state, ...@@ -119,15 +101,6 @@ bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
worklist_local, callback); worklist_local, callback);
} }
void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
DCHECK(header);
DCHECK(!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
const GCInfo& gcinfo =
GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
gcinfo.trace(visitor, header->Payload());
}
size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule, size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
HeapBase& heap) { HeapBase& heap) {
return schedule.GetNextIncrementalStepDuration( return schedule.GetNextIncrementalStepDuration(
...@@ -204,6 +177,7 @@ MarkerBase::~MarkerBase() { ...@@ -204,6 +177,7 @@ MarkerBase::~MarkerBase() {
} }
void MarkerBase::StartMarking() { void MarkerBase::StartMarking() {
DCHECK(!is_marking_started_);
heap().stats_collector()->NotifyMarkingStarted(); heap().stats_collector()->NotifyMarkingStarted();
is_marking_started_ = true; is_marking_started_ = true;
...@@ -213,13 +187,18 @@ void MarkerBase::StartMarking() { ...@@ -213,13 +187,18 @@ void MarkerBase::StartMarking() {
// Scanning the stack is expensive so we only do it at the atomic pause. // Scanning the stack is expensive so we only do it at the atomic pause.
VisitRoots(MarkingConfig::StackState::kNoHeapPointers); VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
ScheduleIncrementalMarkingTask(); ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
concurrent_marker_->Start();
}
} }
} }
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) { void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
if (ExitIncrementalMarkingIfNeeded(config_, heap())) { if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
// Cancel remaining incremental tasks. // Cancel remaining concurrent/incremental tasks.
if (incremental_marking_handle_) incremental_marking_handle_.Cancel(); concurrent_marker_->Cancel();
incremental_marking_handle_.Cancel();
} }
config_.stack_state = stack_state; config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic; config_.marking_type = MarkingConfig::MarkingType::kAtomic;
...@@ -246,8 +225,8 @@ void MarkerBase::LeaveAtomicPause() { ...@@ -246,8 +225,8 @@ void MarkerBase::LeaveAtomicPause() {
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) { void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_started_); DCHECK(is_marking_started_);
EnterAtomicPause(stack_state); EnterAtomicPause(stack_state);
ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(), CHECK(ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
v8::base::TimeTicks::Max()); v8::base::TimeTicks::Max()));
mutator_marking_state_.Publish(); mutator_marking_state_.Publish();
LeaveAtomicPause(); LeaveAtomicPause();
} }
...@@ -302,13 +281,11 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) { ...@@ -302,13 +281,11 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
return AdvanceMarkingWithDeadline(); return AdvanceMarkingWithDeadline();
} }
bool MarkerBase::AdvanceMarkingOnAllocation() { void MarkerBase::AdvanceMarkingOnAllocation() {
bool is_done = AdvanceMarkingWithDeadline(); if (AdvanceMarkingWithDeadline()) {
if (is_done) {
// Schedule another incremental task for finalizing without a stack. // Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask(); ScheduleIncrementalMarkingTask();
} }
return is_done;
} }
bool MarkerBase::AdvanceMarkingWithMaxDuration( bool MarkerBase::AdvanceMarkingWithMaxDuration(
...@@ -326,26 +303,28 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) { ...@@ -326,26 +303,28 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
v8::base::TimeTicks::Now() + max_duration); v8::base::TimeTicks::Now() + max_duration);
} }
schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes()); schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes());
mutator_marking_state_.Publish();
if (!is_done) { if (!is_done) {
// If marking is atomic, |is_done| should always be true. // If marking is atomic, |is_done| should always be true.
DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type); DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
ScheduleIncrementalMarkingTask(); ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
}
} }
mutator_marking_state_.Publish();
return is_done; return is_done;
} }
bool MarkerBase::ProcessWorklistsWithDeadline( bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) { size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do { do {
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
if (!DrainWorklistWithBytesAndTimeDeadline( if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline, mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(), mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) { [this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header); DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header); mutator_marking_state_.AccountMarkedBytes(*header);
})) { })) {
return false; return false;
...@@ -371,7 +350,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline( ...@@ -371,7 +350,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline, mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(), mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) { [this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header); DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header); mutator_marking_state_.AccountMarkedBytes(*header);
})) { })) {
return false; return false;
...@@ -405,7 +385,10 @@ Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform, ...@@ -405,7 +385,10 @@ Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
: MarkerBase(key, heap, platform, config), : MarkerBase(key, heap, platform, config),
marking_visitor_(heap, mutator_marking_state_), marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_, conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {} marking_visitor_) {
concurrent_marker_ = std::make_unique<ConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_);
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/heap/base/worklist.h" #include "src/heap/base/worklist.h"
#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/globals.h" #include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/incremental-marking-schedule.h" #include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h" #include "src/heap/cppgc/marking-state.h"
...@@ -73,7 +74,7 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -73,7 +74,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta); bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta);
// Makes marking progress when allocation a new lab. // Makes marking progress when allocation a new lab.
bool AdvanceMarkingOnAllocation(); void AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more // Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed. // objects to be marked and merely updates marking states if needed.
...@@ -94,7 +95,9 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -94,7 +95,9 @@ class V8_EXPORT_PRIVATE MarkerBase {
HeapBase& heap() { return heap_; } HeapBase& heap() { return heap_; }
MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; } MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
MarkingState& MarkingStateForTesting() { return mutator_marking_state_; } MutatorMarkingState& MutatorMarkingStateForTesting() {
return mutator_marking_state_;
}
cppgc::Visitor& VisitorForTesting() { return visitor(); } cppgc::Visitor& VisitorForTesting() { return visitor(); }
void ClearAllWorklistsForTesting(); void ClearAllWorklistsForTesting();
...@@ -163,11 +166,13 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -163,11 +166,13 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingTask::Handle incremental_marking_handle_; IncrementalMarkingTask::Handle incremental_marking_handle_;
MarkingWorklists marking_worklists_; MarkingWorklists marking_worklists_;
MarkingState mutator_marking_state_; MutatorMarkingState mutator_marking_state_;
bool is_marking_started_ = false; bool is_marking_started_{false};
IncrementalMarkingSchedule schedule_; IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool incremental_marking_disabled_for_testing_{false}; bool incremental_marking_disabled_for_testing_{false};
friend class MarkerFactory; friend class MarkerFactory;
...@@ -201,7 +206,7 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase { ...@@ -201,7 +206,7 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
} }
private: private:
MarkingVisitor marking_visitor_; MutatorMarkingVisitor marking_visitor_;
ConservativeMarkingVisitor conservative_marking_visitor_; ConservativeMarkingVisitor conservative_marking_visitor_;
}; };
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
void MarkingState::FlushNotFullyConstructedObjects() { void MutatorMarkingState::FlushNotFullyConstructedObjects() {
std::unordered_set<HeapObjectHeader*> objects = std::unordered_set<HeapObjectHeader*> objects =
not_fully_constructed_worklist_.Extract(); not_fully_constructed_worklist_.Extract();
for (HeapObjectHeader* object : objects) { for (HeapObjectHeader* object : objects) {
......
...@@ -16,28 +16,19 @@ namespace cppgc { ...@@ -16,28 +16,19 @@ namespace cppgc {
namespace internal { namespace internal {
// C++ marking implementation. // C++ marking implementation.
class MarkingState { class MarkingStateBase {
public: public:
inline MarkingState(HeapBase& heap, MarkingWorklists&); inline MarkingStateBase(HeapBase& heap, MarkingWorklists&);
MarkingState(const MarkingState&) = delete; MarkingStateBase(const MarkingStateBase&) = delete;
MarkingState& operator=(const MarkingState&) = delete; MarkingStateBase& operator=(const MarkingStateBase&) = delete;
inline void MarkAndPush(const void*, TraceDescriptor); inline void MarkAndPush(const void*, TraceDescriptor);
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
inline void MarkAndPush(HeapObjectHeader&); inline void MarkAndPush(HeapObjectHeader&);
inline bool MarkNoPush(HeapObjectHeader&);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
inline void DynamicallyMarkAddress(ConstAddress);
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor, inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*); WeakCallback, const void*);
inline void RegisterWeakCallback(WeakCallback, const void*); inline void RegisterWeakCallback(WeakCallback, const void*);
inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void AccountMarkedBytes(const HeapObjectHeader&); inline void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; } size_t marked_bytes() const { return marked_bytes_; }
...@@ -49,10 +40,6 @@ class MarkingState { ...@@ -49,10 +40,6 @@ class MarkingState {
write_barrier_worklist_.Publish(); write_barrier_worklist_.Publish();
} }
// Moves objects in not_fully_constructed_worklist_ to
// previously_not_full_constructed_worklists_.
void FlushNotFullyConstructedObjects();
MarkingWorklists::MarkingWorklist::Local& marking_worklist() { MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
return marking_worklist_; return marking_worklist_;
} }
...@@ -71,7 +58,11 @@ class MarkingState { ...@@ -71,7 +58,11 @@ class MarkingState {
return write_barrier_worklist_; return write_barrier_worklist_;
} }
private: protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
inline bool MarkNoPush(HeapObjectHeader&);
#ifdef DEBUG #ifdef DEBUG
HeapBase& heap_; HeapBase& heap_;
#endif // DEBUG #endif // DEBUG
...@@ -87,7 +78,8 @@ class MarkingState { ...@@ -87,7 +78,8 @@ class MarkingState {
size_t marked_bytes_ = 0; size_t marked_bytes_ = 0;
}; };
MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists) MarkingStateBase::MarkingStateBase(HeapBase& heap,
MarkingWorklists& marking_worklists)
: :
#ifdef DEBUG #ifdef DEBUG
heap_(heap), heap_(heap),
...@@ -101,14 +93,15 @@ MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists) ...@@ -101,14 +93,15 @@ MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
write_barrier_worklist_(marking_worklists.write_barrier_worklist()) { write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
} }
void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) { void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
DCHECK_NOT_NULL(object); DCHECK_NOT_NULL(object);
MarkAndPush(HeapObjectHeader::FromPayload( MarkAndPush(HeapObjectHeader::FromPayload(
const_cast<void*>(desc.base_object_payload)), const_cast<void*>(desc.base_object_payload)),
desc); desc);
} }
void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) { void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
TraceDescriptor desc) {
DCHECK_NOT_NULL(desc.callback); DCHECK_NOT_NULL(desc.callback);
if (header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) { if (header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
...@@ -118,7 +111,7 @@ void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) { ...@@ -118,7 +111,7 @@ void MarkingState::MarkAndPush(HeapObjectHeader& header, TraceDescriptor desc) {
} }
} }
bool MarkingState::MarkNoPush(HeapObjectHeader& header) { bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
// A GC should only mark the objects that belong in its heap. // A GC should only mark the objects that belong in its heap.
DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap()); DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly // Never mark free space objects. This would e.g. hint to marking a promptly
...@@ -127,31 +120,17 @@ bool MarkingState::MarkNoPush(HeapObjectHeader& header) { ...@@ -127,31 +120,17 @@ bool MarkingState::MarkNoPush(HeapObjectHeader& header) {
return header.TryMarkAtomic(); return header.TryMarkAtomic();
} }
template <HeapObjectHeader::AccessMode mode> void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
void MarkingState::DynamicallyMarkAddress(ConstAddress address) {
HeapObjectHeader& header =
BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress<mode>(
const_cast<Address>(address));
DCHECK(!header.IsInConstruction<mode>());
if (MarkNoPush(header)) {
marking_worklist_.Push(
{reinterpret_cast<void*>(header.Payload()),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>())
.trace});
}
}
void MarkingState::MarkAndPush(HeapObjectHeader& header) {
MarkAndPush( MarkAndPush(
header, header,
{header.Payload(), {header.Payload(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace}); GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
} }
void MarkingState::RegisterWeakReferenceIfNeeded(const void* object, void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
TraceDescriptor desc, TraceDescriptor desc,
WeakCallback weak_callback, WeakCallback weak_callback,
const void* parameter) { const void* parameter) {
// Filter out already marked values. The write barrier for WeakMember // Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does // ensures that any newly set value after this point is kept alive and does
// not require the callback. // not require the callback.
...@@ -161,10 +140,48 @@ void MarkingState::RegisterWeakReferenceIfNeeded(const void* object, ...@@ -161,10 +140,48 @@ void MarkingState::RegisterWeakReferenceIfNeeded(const void* object,
RegisterWeakCallback(weak_callback, parameter); RegisterWeakCallback(weak_callback, parameter);
} }
void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object, void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
TraceDescriptor desc, marked_bytes_ +=
WeakCallback weak_callback, header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
const void* parameter) { ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>();
}
class MutatorMarkingState : public MarkingStateBase {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
: MarkingStateBase(heap, marking_worklists) {}
inline bool MarkNoPush(HeapObjectHeader& header) {
return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
}
inline void DynamicallyMarkAddress(ConstAddress);
// Moves objects in not_fully_constructed_worklist_ to
// previously_not_full_constructed_worklists_.
void FlushNotFullyConstructedObjects();
inline void InvokeWeakRootsCallbackIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
};
void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
HeapObjectHeader& header =
BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
const_cast<Address>(address));
DCHECK(!header.IsInConstruction());
if (MarkNoPush(header)) {
marking_worklist_.Push(
{reinterpret_cast<void*>(header.Payload()),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
}
void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
const void* object, TraceDescriptor desc, WeakCallback weak_callback,
const void* parameter) {
// Since weak roots are only traced at the end of marking, we can execute // Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it. // the callback instead of registering it.
#if DEBUG #if DEBUG
...@@ -175,17 +192,56 @@ void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object, ...@@ -175,17 +192,56 @@ void MarkingState::InvokeWeakRootsCallbackIfNeeded(const void* object,
weak_callback(LivenessBrokerFactory::Create(), parameter); weak_callback(LivenessBrokerFactory::Create(), parameter);
} }
void MarkingState::RegisterWeakCallback(WeakCallback callback, void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
const void* object) { const void* object) {
weak_callback_worklist_.Push({callback, object}); weak_callback_worklist_.Push({callback, object});
} }
void MarkingState::AccountMarkedBytes(const HeapObjectHeader& header) { class ConcurrentMarkingState : public MarkingStateBase {
marked_bytes_ += public:
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>() ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header)) : MarkingStateBase(heap, marking_worklists) {}
->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>(); ~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
size_t RecentlyMarkedBytes() {
return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
}
private:
size_t last_marked_bytes_ = 0;
};
template <size_t deadline_check_interval, typename WorklistLocal,
typename Callback, typename Predicate>
bool DrainWorklistWithPredicate(Predicate should_yield,
WorklistLocal& worklist_local,
Callback callback) {
if (worklist_local.IsLocalAndGlobalEmpty()) return true;
// For concurrent markers, should_yield also reports marked bytes.
if (should_yield()) return false;
size_t processed_callback_count = deadline_check_interval;
typename WorklistLocal::ItemType item;
while (worklist_local.Pop(&item)) {
callback(item);
if (--processed_callback_count == 0) {
if (should_yield()) {
return false;
}
processed_callback_count = deadline_check_interval;
}
}
return true;
}
template <HeapObjectHeader::AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
DCHECK(header.IsMarked<mode>());
const GCInfo& gcinfo =
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex<mode>());
gcinfo.trace(&visitor, header.Payload());
} }
} // namespace internal } // namespace internal
......
...@@ -10,38 +10,28 @@ ...@@ -10,38 +10,28 @@
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
MarkingVisitor::MarkingVisitor(HeapBase& heap, MarkingState& marking_state) MarkingVisitorBase::MarkingVisitorBase(HeapBase& heap,
MarkingStateBase& marking_state)
: marking_state_(marking_state) {} : marking_state_(marking_state) {}
void MarkingVisitor::Visit(const void* object, TraceDescriptor desc) { void MarkingVisitorBase::Visit(const void* object, TraceDescriptor desc) {
marking_state_.MarkAndPush(object, desc); marking_state_.MarkAndPush(object, desc);
} }
void MarkingVisitor::VisitWeak(const void* object, TraceDescriptor desc, void MarkingVisitorBase::VisitWeak(const void* object, TraceDescriptor desc,
WeakCallback weak_callback, WeakCallback weak_callback,
const void* weak_member) { const void* weak_member) {
marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback, marking_state_.RegisterWeakReferenceIfNeeded(object, desc, weak_callback,
weak_member); weak_member);
} }
void MarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc) { void MarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
Visit(object, desc); const void* object) {
}
void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
marking_state_.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback,
weak_root);
}
void MarkingVisitor::RegisterWeakCallback(WeakCallback callback,
const void* object) {
marking_state_.RegisterWeakCallback(callback, object); marking_state_.RegisterWeakCallback(callback, object);
} }
ConservativeMarkingVisitor::ConservativeMarkingVisitor( ConservativeMarkingVisitor::ConservativeMarkingVisitor(
HeapBase& heap, MarkingState& marking_state, cppgc::Visitor& visitor) HeapBase& heap, MutatorMarkingState& marking_state, cppgc::Visitor& visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), visitor), : ConservativeTracingVisitor(heap, *heap.page_backend(), visitor),
marking_state_(marking_state) {} marking_state_(marking_state) {}
...@@ -52,6 +42,27 @@ void ConservativeMarkingVisitor::VisitConservatively( ...@@ -52,6 +42,27 @@ void ConservativeMarkingVisitor::VisitConservatively(
marking_state_.AccountMarkedBytes(header); marking_state_.AccountMarkedBytes(header);
} }
MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap,
MutatorMarkingState& marking_state)
: MarkingVisitorBase(heap, marking_state) {}
void MutatorMarkingVisitor::VisitRoot(const void* object,
TraceDescriptor desc) {
Visit(object, desc);
}
void MutatorMarkingVisitor::VisitWeakRoot(const void* object,
TraceDescriptor desc,
WeakCallback weak_callback,
const void* weak_root) {
static_cast<MutatorMarkingState&>(marking_state_)
.InvokeWeakRootsCallbackIfNeeded(object, desc, weak_callback, weak_root);
}
ConcurrentMarkingVisitor::ConcurrentMarkingVisitor(
HeapBase& heap, ConcurrentMarkingState& marking_state)
: MarkingVisitorBase(heap, marking_state) {}
void ConservativeMarkingVisitor::VisitPointer(const void* address) { void ConservativeMarkingVisitor::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address); TraceConservativelyIfNeeded(address);
} }
......
...@@ -16,28 +16,51 @@ namespace internal { ...@@ -16,28 +16,51 @@ namespace internal {
class HeapBase; class HeapBase;
class HeapObjectHeader; class HeapObjectHeader;
class Marker; class Marker;
class MarkingState; class MarkingStateBase;
class MutatorMarkingState;
class ConcurrentMarkingState;
class V8_EXPORT_PRIVATE MarkingVisitor : public VisitorBase { class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
public: public:
MarkingVisitor(HeapBase&, MarkingState&); MarkingVisitorBase(HeapBase&, MarkingStateBase&);
~MarkingVisitor() override = default; ~MarkingVisitorBase() override = default;
protected: protected:
void Visit(const void*, TraceDescriptor) final; void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final; void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
void RegisterWeakCallback(WeakCallback, const void*) final;
MarkingStateBase& marking_state_;
};
class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
public:
MutatorMarkingVisitor(HeapBase&, MutatorMarkingState&);
~MutatorMarkingVisitor() override = default;
protected:
void VisitRoot(const void*, TraceDescriptor) final; void VisitRoot(const void*, TraceDescriptor) final;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback, void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final; const void*) final;
void RegisterWeakCallback(WeakCallback, const void*) final; };
class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase {
public:
ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&);
~ConcurrentMarkingVisitor() override = default;
MarkingState& marking_state_; protected:
void VisitRoot(const void*, TraceDescriptor) final { UNREACHABLE(); }
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) final {
UNREACHABLE();
}
}; };
class ConservativeMarkingVisitor : public ConservativeTracingVisitor, class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
public heap::base::StackVisitor { public heap::base::StackVisitor {
public: public:
ConservativeMarkingVisitor(HeapBase&, MarkingState&, cppgc::Visitor&); ConservativeMarkingVisitor(HeapBase&, MutatorMarkingState&, cppgc::Visitor&);
~ConservativeMarkingVisitor() override = default; ~ConservativeMarkingVisitor() override = default;
private: private:
...@@ -45,7 +68,7 @@ class ConservativeMarkingVisitor : public ConservativeTracingVisitor, ...@@ -45,7 +68,7 @@ class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
TraceConservativelyCallback) final; TraceConservativelyCallback) final;
void VisitPointer(const void*) final; void VisitPointer(const void*) final;
MarkingState& marking_state_; MutatorMarkingState& marking_state_;
}; };
} // namespace internal } // namespace internal
......
...@@ -44,19 +44,20 @@ class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin { ...@@ -44,19 +44,20 @@ class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
void Trace(cppgc::Visitor*) const override {} void Trace(cppgc::Visitor*) const override {}
}; };
class TestMarkingVisitor : public MarkingVisitor { class TestMarkingVisitor : public MutatorMarkingVisitor {
public: public:
explicit TestMarkingVisitor(Marker* marker) explicit TestMarkingVisitor(Marker* marker)
: MarkingVisitor(marker->heap(), marker->MarkingStateForTesting()) {} : MutatorMarkingVisitor(marker->heap(),
marker->MutatorMarkingStateForTesting()) {}
~TestMarkingVisitor() { marking_state_.Publish(); } ~TestMarkingVisitor() { marking_state_.Publish(); }
MarkingState& marking_state() { return marking_state_; } MarkingStateBase& marking_state() { return marking_state_; }
}; };
} // namespace } // namespace
TEST_F(MarkingVisitorTest, MarkedBytesAreInitiallyZero) { TEST_F(MarkingVisitorTest, MarkedBytesAreInitiallyZero) {
EXPECT_EQ(0u, GetMarker()->MarkingStateForTesting().marked_bytes()); EXPECT_EQ(0u, GetMarker()->MutatorMarkingStateForTesting().marked_bytes());
} }
// Strong references are marked. // Strong references are marked.
......
...@@ -43,9 +43,10 @@ class ExpectWriteBarrierFires final : private IncrementalMarkingScope { ...@@ -43,9 +43,10 @@ class ExpectWriteBarrierFires final : private IncrementalMarkingScope {
ExpectWriteBarrierFires(MarkerBase* marker, ExpectWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects) std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker), : IncrementalMarkingScope(marker),
marking_worklist_(marker->MarkingStateForTesting().marking_worklist()), marking_worklist_(
marker->MutatorMarkingStateForTesting().marking_worklist()),
write_barrier_worklist_( write_barrier_worklist_(
marker->MarkingStateForTesting().write_barrier_worklist()), marker->MutatorMarkingStateForTesting().write_barrier_worklist()),
objects_(objects) { objects_(objects) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty()); EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty()); EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
...@@ -92,9 +93,10 @@ class ExpectNoWriteBarrierFires final : private IncrementalMarkingScope { ...@@ -92,9 +93,10 @@ class ExpectNoWriteBarrierFires final : private IncrementalMarkingScope {
ExpectNoWriteBarrierFires(MarkerBase* marker, ExpectNoWriteBarrierFires(MarkerBase* marker,
std::initializer_list<void*> objects) std::initializer_list<void*> objects)
: IncrementalMarkingScope(marker), : IncrementalMarkingScope(marker),
marking_worklist_(marker->MarkingStateForTesting().marking_worklist()), marking_worklist_(
marker->MutatorMarkingStateForTesting().marking_worklist()),
write_barrier_worklist_( write_barrier_worklist_(
marker->MarkingStateForTesting().write_barrier_worklist()) { marker->MutatorMarkingStateForTesting().write_barrier_worklist()) {
EXPECT_TRUE(marking_worklist_.IsGlobalEmpty()); EXPECT_TRUE(marking_worklist_.IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty()); EXPECT_TRUE(write_barrier_worklist_.IsGlobalEmpty());
for (void* object : objects) { for (void* object : objects) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment