Commit 8ac2d54a authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Add bailout for concurrent marking

Tracing JSMembers uses the bailout mechanism.
The bailout is implemented as a dynamic mechanism named
DeferTraceToMutatorThreadIfConcurrent that is called from
relevant Trace methods.

Bug: chromium:1056170
Change-Id: I90e6feae25c4c832be256693f9e44a963a6794b7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2426613
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70429}
parent 4569ffae
...@@ -47,7 +47,7 @@ using WeakCallback = void (*)(const LivenessBroker&, const void*); ...@@ -47,7 +47,7 @@ using WeakCallback = void (*)(const LivenessBroker&, const void*);
* }; * };
* \endcode * \endcode
*/ */
class Visitor { class V8_EXPORT Visitor {
public: public:
class Key { class Key {
private: private:
...@@ -131,6 +131,25 @@ class Visitor { ...@@ -131,6 +131,25 @@ class Visitor {
*/ */
virtual void RegisterWeakCallback(WeakCallback callback, const void* data) {} virtual void RegisterWeakCallback(WeakCallback callback, const void* data) {}
/**
* Defers tracing an object from a concurrent thread to the mutator thread.
* Should be called by Trace methods of types that are not safe to trace
* concurrently.
*
* \param parameter tells the trace callback which object was deferred.
* \param callback to be invoked for tracing on the mutator thread.
* \param deferred_size size of deferred object.
*
* \returns false if the object does not need to be deferred (i.e. currently
* traced on the mutator thread) and true otherwise (i.e. currently traced on
* a concurrent thread).
*/
virtual V8_WARN_UNUSED_RESULT bool DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, TraceCallback callback, size_t deferred_size) {
// By default tracing is not deferred.
return false;
}
protected: protected:
virtual void Visit(const void* self, TraceDescriptor) {} virtual void Visit(const void* self, TraceDescriptor) {}
virtual void VisitWeak(const void* self, TraceDescriptor, WeakCallback, virtual void VisitWeak(const void* self, TraceDescriptor, WeakCallback,
...@@ -200,7 +219,7 @@ class Visitor { ...@@ -200,7 +219,7 @@ class Visitor {
} }
#if V8_ENABLE_CHECKS #if V8_ENABLE_CHECKS
V8_EXPORT void CheckObjectNotInConstruction(const void* address); void CheckObjectNotInConstruction(const void* address);
#endif // V8_ENABLE_CHECKS #endif // V8_ENABLE_CHECKS
template <typename T, typename WeaknessPolicy, typename LocationPolicy, template <typename T, typename WeaknessPolicy, typename LocationPolicy,
......
...@@ -210,8 +210,7 @@ class JSVisitor : public cppgc::Visitor { ...@@ -210,8 +210,7 @@ class JSVisitor : public cppgc::Visitor {
public: public:
explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {} explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {}
template <typename T> void Trace(const internal::JSMemberBase& ref) {
void Trace(const JSMember<T>& ref) {
if (ref.IsEmptyThreadSafe()) return; if (ref.IsEmptyThreadSafe()) return;
Visit(ref); Visit(ref);
} }
...@@ -226,9 +225,9 @@ class JSVisitor : public cppgc::Visitor { ...@@ -226,9 +225,9 @@ class JSVisitor : public cppgc::Visitor {
namespace cppgc { namespace cppgc {
template <typename T> template <>
struct TraceTrait<v8::JSMember<T>> { struct TraceTrait<v8::internal::JSMemberBase> {
static void Trace(Visitor* visitor, const v8::JSMember<T>* self) { static void Trace(Visitor* visitor, const v8::internal::JSMemberBase* self) {
static_cast<v8::JSVisitor*>(visitor)->Trace(*self); static_cast<v8::JSVisitor*>(visitor)->Trace(*self);
} }
}; };
......
...@@ -35,7 +35,6 @@ class UnifiedHeapMarkingState { ...@@ -35,7 +35,6 @@ class UnifiedHeapMarkingState {
}; };
void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) { void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) {
// TODO(chromium:1056170): Defer concurrent handling using the bailout.
heap_.RegisterExternallyReferencedObject( heap_.RegisterExternallyReferencedObject(
JSMemberBaseExtractor::ObjectReference(ref)); JSMemberBaseExtractor::ObjectReference(ref));
} }
......
...@@ -37,8 +37,18 @@ void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback, ...@@ -37,8 +37,18 @@ void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
marking_state_.RegisterWeakCallback(callback, object); marking_state_.RegisterWeakCallback(callback, object);
} }
namespace {
void DeferredTraceJSMember(cppgc::Visitor* visitor, const void* ref) {
static_cast<JSVisitor*>(visitor)->Trace(
*static_cast<const internal::JSMemberBase*>(ref));
}
} // namespace
void UnifiedHeapMarkingVisitorBase::Visit(const internal::JSMemberBase& ref) { void UnifiedHeapMarkingVisitorBase::Visit(const internal::JSMemberBase& ref) {
unified_heap_marking_state_.MarkAndPush(ref); bool should_defer_tracing =
DeferTraceToMutatorThreadIfConcurrent(&ref, DeferredTraceJSMember, 0);
if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
} }
MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor( MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
...@@ -66,5 +76,15 @@ ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor( ...@@ -66,5 +76,15 @@ ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
: UnifiedHeapMarkingVisitorBase(heap, marking_state, : UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {} unified_heap_marking_state) {}
bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, cppgc::TraceCallback callback,
size_t deferred_size) {
marking_state_.concurrent_marking_bailout_worklist().Push(
{parameter, callback, deferred_size});
static_cast<ConcurrentMarkingState&>(marking_state_)
.AccountDeferredMarkedBytes(deferred_size);
return true;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor { ...@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
UnifiedHeapMarkingState& unified_heap_marking_state_; UnifiedHeapMarkingState& unified_heap_marking_state_;
}; };
class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase { : public UnifiedHeapMarkingVisitorBase {
public: public:
MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&, MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
...@@ -61,7 +61,7 @@ class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor ...@@ -61,7 +61,7 @@ class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
const void*) final; const void*) final;
}; };
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase { : public UnifiedHeapMarkingVisitorBase {
public: public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&, ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&,
...@@ -74,6 +74,9 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor ...@@ -74,6 +74,9 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
const void*) final { const void*) final {
UNREACHABLE(); UNREACHABLE();
} }
bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
size_t) final;
}; };
} // namespace internal } // namespace internal
......
...@@ -103,10 +103,10 @@ void ConcurrentMarkingTask::ProcessWorklists( ...@@ -103,10 +103,10 @@ void ConcurrentMarkingTask::ProcessWorklists(
[&concurrent_marking_state, [&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) { &concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad(); BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject< DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>( HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header); concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) { })) {
return; return;
} }
...@@ -124,9 +124,9 @@ void ConcurrentMarkingTask::ProcessWorklists( ...@@ -124,9 +124,9 @@ void ConcurrentMarkingTask::ProcessWorklists(
DCHECK(!header.IsInConstruction< DCHECK(!header.IsInConstruction<
HeapObjectHeader::AccessMode::kAtomic>()); HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>()); DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
concurrent_marking_state.AccountMarkedBytes(header);
item.callback(&concurrent_marking_visitor, item.callback(&concurrent_marking_visitor,
item.base_object_payload); item.base_object_payload);
concurrent_marking_state.AccountMarkedBytes(header);
})) { })) {
return; return;
} }
...@@ -138,10 +138,10 @@ void ConcurrentMarkingTask::ProcessWorklists( ...@@ -138,10 +138,10 @@ void ConcurrentMarkingTask::ProcessWorklists(
[&concurrent_marking_state, [&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) { &concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad(); BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject< DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>( HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header); concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) { })) {
return; return;
} }
...@@ -168,7 +168,13 @@ void ConcurrentMarkerBase::Start() { ...@@ -168,7 +168,13 @@ void ConcurrentMarkerBase::Start() {
} }
void ConcurrentMarkerBase::Cancel() { void ConcurrentMarkerBase::Cancel() {
if (concurrent_marking_handle_) concurrent_marking_handle_->Cancel(); if (concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning())
concurrent_marking_handle_->Cancel();
}
void ConcurrentMarkerBase::JoinForTesting() {
if (concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning())
concurrent_marking_handle_->Join();
} }
ConcurrentMarkerBase::~ConcurrentMarkerBase() { ConcurrentMarkerBase::~ConcurrentMarkerBase() {
......
...@@ -26,6 +26,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase { ...@@ -26,6 +26,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
void Start(); void Start();
void Cancel(); void Cancel();
void JoinForTesting();
bool NotifyIncrementalMutatorStepCompleted(); bool NotifyIncrementalMutatorStepCompleted();
HeapBase& heap() const { return heap_; } HeapBase& heap() const { return heap_; }
......
...@@ -190,6 +190,7 @@ void MarkerBase::StartMarking() { ...@@ -190,6 +190,7 @@ void MarkerBase::StartMarking() {
ScheduleIncrementalMarkingTask(); ScheduleIncrementalMarkingTask();
if (config_.marking_type == if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) { MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start(); concurrent_marker_->Start();
} }
} }
...@@ -335,13 +336,27 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) { ...@@ -335,13 +336,27 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
bool MarkerBase::ProcessWorklistsWithDeadline( bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) { size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do { do {
// Bailout objects may be complicated to trace and thus might take longer
// than other objects. Therefore we reduce the interval between deadline
// checks to guarantee the deadline is not exceeded.
if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
5>(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.concurrent_marking_bailout_worklist(),
[this](const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
item.callback(&visitor(), item.parameter);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline( if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline, mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(), mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) { [this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject< DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header); HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header);
})) { })) {
return false; return false;
} }
...@@ -356,8 +371,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline( ...@@ -356,8 +371,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
HeapObjectHeader::AccessMode::kNonAtomic>()); HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK( DCHECK(
header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>()); header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
item.callback(&visitor(), item.base_object_payload);
mutator_marking_state_.AccountMarkedBytes(header); mutator_marking_state_.AccountMarkedBytes(header);
item.callback(&visitor(), item.base_object_payload);
})) { })) {
return false; return false;
} }
...@@ -366,9 +381,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline( ...@@ -366,9 +381,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline, mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(), mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) { [this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject< DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header); HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header);
})) { })) {
return false; return false;
} }
...@@ -396,6 +411,10 @@ void MarkerBase::DisableIncrementalMarkingForTesting() { ...@@ -396,6 +411,10 @@ void MarkerBase::DisableIncrementalMarkingForTesting() {
incremental_marking_disabled_for_testing_ = true; incremental_marking_disabled_for_testing_ = true;
} }
void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform, Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config) MarkingConfig config)
: MarkerBase(key, heap, platform, config), : MarkerBase(key, heap, platform, config),
......
...@@ -122,6 +122,8 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -122,6 +122,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void DisableIncrementalMarkingForTesting(); void DisableIncrementalMarkingForTesting();
void WaitForConcurrentMarkingForTesting();
protected: protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration = static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2); v8::base::TimeDelta::FromMilliseconds(2);
......
...@@ -31,6 +31,7 @@ class MarkingStateBase { ...@@ -31,6 +31,7 @@ class MarkingStateBase {
inline void RegisterWeakCallback(WeakCallback, const void*); inline void RegisterWeakCallback(WeakCallback, const void*);
inline void AccountMarkedBytes(const HeapObjectHeader&); inline void AccountMarkedBytes(const HeapObjectHeader&);
inline void AccountMarkedBytes(size_t);
size_t marked_bytes() const { return marked_bytes_; } size_t marked_bytes() const { return marked_bytes_; }
void Publish() { void Publish() {
...@@ -38,6 +39,7 @@ class MarkingStateBase { ...@@ -38,6 +39,7 @@ class MarkingStateBase {
previously_not_fully_constructed_worklist_.Publish(); previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish(); weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish(); write_barrier_worklist_.Publish();
concurrent_marking_bailout_worklist_.Publish();
} }
MarkingWorklists::MarkingWorklist::Local& marking_worklist() { MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
...@@ -57,6 +59,10 @@ class MarkingStateBase { ...@@ -57,6 +59,10 @@ class MarkingStateBase {
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() { MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_; return write_barrier_worklist_;
} }
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local&
concurrent_marking_bailout_worklist() {
return concurrent_marking_bailout_worklist_;
}
protected: protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor); inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
...@@ -74,6 +80,8 @@ class MarkingStateBase { ...@@ -74,6 +80,8 @@ class MarkingStateBase {
previously_not_fully_constructed_worklist_; previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_; MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_; MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local
concurrent_marking_bailout_worklist_;
size_t marked_bytes_ = 0; size_t marked_bytes_ = 0;
}; };
...@@ -90,7 +98,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap, ...@@ -90,7 +98,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
previously_not_fully_constructed_worklist_( previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()), marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()), weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
write_barrier_worklist_(marking_worklists.write_barrier_worklist()) { write_barrier_worklist_(marking_worklists.write_barrier_worklist()),
concurrent_marking_bailout_worklist_(
marking_worklists.concurrent_marking_bailout_worklist()) {
} }
void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) { void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
...@@ -141,11 +151,15 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object, ...@@ -141,11 +151,15 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
} }
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) { void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ += AccountMarkedBytes(
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>() header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header)) ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize() ->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>(); : header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
}
void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
marked_bytes_ += marked_bytes;
} }
class MutatorMarkingState : public MarkingStateBase { class MutatorMarkingState : public MarkingStateBase {
...@@ -208,6 +222,13 @@ class ConcurrentMarkingState : public MarkingStateBase { ...@@ -208,6 +222,13 @@ class ConcurrentMarkingState : public MarkingStateBase {
return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_); return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
} }
inline void AccountDeferredMarkedBytes(size_t deferred_bytes) {
// AccountDeferredMarkedBytes is called from Trace methods, which are always
// called after AccountMarkedBytes, so there should be no underflow here.
DCHECK_LE(deferred_bytes, marked_bytes_);
marked_bytes_ -= deferred_bytes;
}
private: private:
size_t last_marked_bytes_ = 0; size_t last_marked_bytes_ = 0;
}; };
......
...@@ -38,8 +38,8 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor( ...@@ -38,8 +38,8 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor(
void ConservativeMarkingVisitor::VisitConservatively( void ConservativeMarkingVisitor::VisitConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) { HeapObjectHeader& header, TraceConservativelyCallback callback) {
marking_state_.MarkNoPush(header); marking_state_.MarkNoPush(header);
callback(this, header);
marking_state_.AccountMarkedBytes(header); marking_state_.AccountMarkedBytes(header);
callback(this, header);
} }
MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap, MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap,
...@@ -67,5 +67,14 @@ void ConservativeMarkingVisitor::VisitPointer(const void* address) { ...@@ -67,5 +67,14 @@ void ConservativeMarkingVisitor::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address); TraceConservativelyIfNeeded(address);
} }
bool ConcurrentMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, TraceCallback callback, size_t deferred_size) {
marking_state_.concurrent_marking_bailout_worklist().Push(
{parameter, callback, deferred_size});
static_cast<ConcurrentMarkingState&>(marking_state_)
.AccountDeferredMarkedBytes(deferred_size);
return true;
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
...@@ -44,7 +44,8 @@ class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase { ...@@ -44,7 +44,8 @@ class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
const void*) final; const void*) final;
}; };
class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase { class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor final
: public MarkingVisitorBase {
public: public:
ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&); ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&);
~ConcurrentMarkingVisitor() override = default; ~ConcurrentMarkingVisitor() override = default;
...@@ -55,6 +56,9 @@ class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase { ...@@ -55,6 +56,9 @@ class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase {
const void*) final { const void*) final {
UNREACHABLE(); UNREACHABLE();
} }
bool DeferTraceToMutatorThreadIfConcurrent(const void*, TraceCallback,
size_t) final;
}; };
class ConservativeMarkingVisitor : public ConservativeTracingVisitor, class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
......
...@@ -16,6 +16,7 @@ void MarkingWorklists::ClearForTesting() { ...@@ -16,6 +16,7 @@ void MarkingWorklists::ClearForTesting() {
previously_not_fully_constructed_worklist_.Clear(); previously_not_fully_constructed_worklist_.Clear();
write_barrier_worklist_.Clear(); write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear(); weak_callback_worklist_.Clear();
concurrent_marking_bailout_worklist_.Clear();
} }
void MarkingWorklists::NotFullyConstructedWorklist::Push( void MarkingWorklists::NotFullyConstructedWorklist::Push(
......
...@@ -21,11 +21,18 @@ class MarkingWorklists { ...@@ -21,11 +21,18 @@ class MarkingWorklists {
static constexpr int kMutatorThreadId = 0; static constexpr int kMutatorThreadId = 0;
using MarkingItem = cppgc::TraceDescriptor; using MarkingItem = cppgc::TraceDescriptor;
struct WeakCallbackItem { struct WeakCallbackItem {
cppgc::WeakCallback callback; cppgc::WeakCallback callback;
const void* parameter; const void* parameter;
}; };
struct ConcurrentMarkingBailoutItem {
const void* parameter;
TraceCallback callback;
size_t bailedout_size;
};
// Segment size of 512 entries necessary to avoid throughput regressions. // Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem. // Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist = using MarkingWorklist =
...@@ -36,6 +43,9 @@ class MarkingWorklists { ...@@ -36,6 +43,9 @@ class MarkingWorklists {
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>; heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist = using WriteBarrierWorklist =
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>; heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
using ConcurrentMarkingBailoutWorklist =
heap::base::Worklist<ConcurrentMarkingBailoutItem,
64 /* local entries */>;
class V8_EXPORT_PRIVATE NotFullyConstructedWorklist { class V8_EXPORT_PRIVATE NotFullyConstructedWorklist {
public: public:
...@@ -72,6 +82,9 @@ class MarkingWorklists { ...@@ -72,6 +82,9 @@ class MarkingWorklists {
WeakCallbackWorklist* weak_callback_worklist() { WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_; return &weak_callback_worklist_;
} }
ConcurrentMarkingBailoutWorklist* concurrent_marking_bailout_worklist() {
return &concurrent_marking_bailout_worklist_;
}
void ClearForTesting(); void ClearForTesting();
...@@ -82,6 +95,7 @@ class MarkingWorklists { ...@@ -82,6 +95,7 @@ class MarkingWorklists {
previously_not_fully_constructed_worklist_; previously_not_fully_constructed_worklist_;
WriteBarrierWorklist write_barrier_worklist_; WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_; WeakCallbackWorklist weak_callback_worklist_;
ConcurrentMarkingBailoutWorklist concurrent_marking_bailout_worklist_;
}; };
} // namespace internal } // namespace internal
......
...@@ -16,51 +16,17 @@ ...@@ -16,51 +16,17 @@
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
#if defined(THREAD_SANITIZER)
namespace { namespace {
class GCed : public GarbageCollected<GCed> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCed> child_;
};
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
explicit GCedWithCallback(Callback callback) {
callback(this);
}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCedWithCallback> child_;
};
class Mixin : public GarbageCollectedMixin {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<Mixin> child_;
};
class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
public:
void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
};
template <typename T>
class GCedHolder : public GarbageCollected<GCedHolder<T>> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object_); }
Member<T> object_;
};
class ConcurrentMarkingTest : public testing::TestWithHeap { class ConcurrentMarkingTest : public testing::TestWithHeap {
public: public:
#if defined(THREAD_SANITIZER)
// Use more iteration on tsan builds to expose data races.
static constexpr int kNumStep = 1000;
#else
static constexpr int kNumStep = 10;
#endif // defined(THREAD_SANITIZER)
using Config = Heap::Config; using Config = Heap::Config;
static constexpr Config ConcurrentPreciseConfig = { static constexpr Config ConcurrentPreciseConfig = {
Config::CollectionType::kMajor, Config::StackState::kNoHeapPointers, Config::CollectionType::kMajor, Config::StackState::kNoHeapPointers,
...@@ -95,16 +61,52 @@ class ConcurrentMarkingTest : public testing::TestWithHeap { ...@@ -95,16 +61,52 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
constexpr ConcurrentMarkingTest::Config constexpr ConcurrentMarkingTest::Config
ConcurrentMarkingTest::ConcurrentPreciseConfig; ConcurrentMarkingTest::ConcurrentPreciseConfig;
template <typename T>
struct GCedHolder : public GarbageCollected<GCedHolder<T>> {
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object); }
Member<T> object;
};
class GCed : public GarbageCollected<GCed> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCed> child_;
};
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
explicit GCedWithCallback(Callback callback) {
callback(this);
}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCedWithCallback> child_;
};
class Mixin : public GarbageCollectedMixin {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<Mixin> child_;
};
class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
public:
void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
};
} // namespace } // namespace
// The following tests below check for data races during concurrent marking. // The following tests below check for data races during concurrent marking.
TEST_F(ConcurrentMarkingTest, MarkingObjects) { TEST_F(ConcurrentMarkingTest, MarkingObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC(); StartConcurrentGC();
Persistent<GCedHolder<GCed>> root = Persistent<GCedHolder<GCed>> root =
MakeGarbageCollected<GCedHolder<GCed>>(GetAllocationHandle()); MakeGarbageCollected<GCedHolder<GCed>>(GetAllocationHandle());
Member<GCed>* last_object = &root->object_; Member<GCed>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) { for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) { for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCed>(GetAllocationHandle()); *last_object = MakeGarbageCollected<GCed>(GetAllocationHandle());
...@@ -117,11 +119,10 @@ TEST_F(ConcurrentMarkingTest, MarkingObjects) { ...@@ -117,11 +119,10 @@ TEST_F(ConcurrentMarkingTest, MarkingObjects) {
} }
TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) { TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC(); StartConcurrentGC();
Persistent<GCedHolder<GCedWithCallback>> root = Persistent<GCedHolder<GCedWithCallback>> root =
MakeGarbageCollected<GCedHolder<GCedWithCallback>>(GetAllocationHandle()); MakeGarbageCollected<GCedHolder<GCedWithCallback>>(GetAllocationHandle());
Member<GCedWithCallback>* last_object = &root->object_; Member<GCedWithCallback>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) { for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) { for (int j = 0; j < kNumStep; ++j) {
MakeGarbageCollected<GCedWithCallback>( MakeGarbageCollected<GCedWithCallback>(
...@@ -137,11 +138,10 @@ TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) { ...@@ -137,11 +138,10 @@ TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
} }
TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) { TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC(); StartConcurrentGC();
Persistent<GCedHolder<Mixin>> root = Persistent<GCedHolder<Mixin>> root =
MakeGarbageCollected<GCedHolder<Mixin>>(GetAllocationHandle()); MakeGarbageCollected<GCedHolder<Mixin>>(GetAllocationHandle());
Member<Mixin>* last_object = &root->object_; Member<Mixin>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) { for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) { for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle()); *last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
...@@ -153,7 +153,58 @@ TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) { ...@@ -153,7 +153,58 @@ TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
FinishGC(); FinishGC();
} }
#endif // defined(THREAD_SANITIZER) namespace {
struct ConcurrentlyTraceable : public GarbageCollected<ConcurrentlyTraceable> {
static size_t trace_counter;
void Trace(Visitor*) const { ++trace_counter; }
};
size_t ConcurrentlyTraceable::trace_counter = 0;
struct NotConcurrentlyTraceable
: public GarbageCollected<NotConcurrentlyTraceable> {
static size_t trace_counter;
void Trace(Visitor* visitor) const {
if (visitor->DeferTraceToMutatorThreadIfConcurrent(
this,
[](Visitor*, const void*) {
++NotConcurrentlyTraceable::trace_counter;
},
sizeof(NotConcurrentlyTraceable)))
return;
++trace_counter;
}
};
size_t NotConcurrentlyTraceable::trace_counter = 0;
} // namespace
TEST_F(ConcurrentMarkingTest, ConcurrentlyTraceableObjectIsTracedConcurrently) {
Persistent<GCedHolder<ConcurrentlyTraceable>> root =
MakeGarbageCollected<GCedHolder<ConcurrentlyTraceable>>(
GetAllocationHandle());
root->object =
MakeGarbageCollected<ConcurrentlyTraceable>(GetAllocationHandle());
EXPECT_EQ(0u, ConcurrentlyTraceable::trace_counter);
StartConcurrentGC();
GetMarkerRef()->WaitForConcurrentMarkingForTesting();
EXPECT_NE(0u, ConcurrentlyTraceable::trace_counter);
FinishGC();
}
TEST_F(ConcurrentMarkingTest,
NotConcurrentlyTraceableObjectIsNotTracedConcurrently) {
Persistent<GCedHolder<NotConcurrentlyTraceable>> root =
MakeGarbageCollected<GCedHolder<NotConcurrentlyTraceable>>(
GetAllocationHandle());
root->object =
MakeGarbageCollected<NotConcurrentlyTraceable>(GetAllocationHandle());
EXPECT_EQ(0u, NotConcurrentlyTraceable::trace_counter);
StartConcurrentGC();
GetMarkerRef()->WaitForConcurrentMarkingForTesting();
EXPECT_EQ(0u, NotConcurrentlyTraceable::trace_counter);
FinishGC();
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment