Commit 8ac2d54a authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Add bailout for concurrent marking

Tracing JSMembers uses the bailout mechanism.
The bailout is implemented as a dynamic mechanism named
DeferTraceToMutatorThreadIfConcurrent that is called from
relevant Trace methods.

Bug: chromium:1056170
Change-Id: I90e6feae25c4c832be256693f9e44a963a6794b7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2426613
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70429}
parent 4569ffae
......@@ -47,7 +47,7 @@ using WeakCallback = void (*)(const LivenessBroker&, const void*);
* };
* \endcode
*/
class Visitor {
class V8_EXPORT Visitor {
public:
class Key {
private:
......@@ -131,6 +131,25 @@ class Visitor {
*/
virtual void RegisterWeakCallback(WeakCallback callback, const void* data) {}
/**
* Defers tracing an object from a concurrent thread to the mutator thread.
* Should be called by Trace methods of types that are not safe to trace
* concurrently.
*
* \param parameter tells the trace callback which object was deferred.
* \param callback to be invoked for tracing on the mutator thread.
* \param deferred_size size of deferred object.
*
* \returns false if the object does not need to be deferred (i.e. currently
* traced on the mutator thread) and true otherwise (i.e. currently traced on
* a concurrent thread).
*/
virtual V8_WARN_UNUSED_RESULT bool DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, TraceCallback callback, size_t deferred_size) {
// By default tracing is not deferred.
return false;
}
protected:
virtual void Visit(const void* self, TraceDescriptor) {}
virtual void VisitWeak(const void* self, TraceDescriptor, WeakCallback,
......@@ -200,7 +219,7 @@ class Visitor {
}
#if V8_ENABLE_CHECKS
V8_EXPORT void CheckObjectNotInConstruction(const void* address);
void CheckObjectNotInConstruction(const void* address);
#endif // V8_ENABLE_CHECKS
template <typename T, typename WeaknessPolicy, typename LocationPolicy,
......
......@@ -210,8 +210,7 @@ class JSVisitor : public cppgc::Visitor {
public:
explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {}
template <typename T>
void Trace(const JSMember<T>& ref) {
void Trace(const internal::JSMemberBase& ref) {
if (ref.IsEmptyThreadSafe()) return;
Visit(ref);
}
......@@ -226,9 +225,9 @@ class JSVisitor : public cppgc::Visitor {
namespace cppgc {
template <typename T>
struct TraceTrait<v8::JSMember<T>> {
static void Trace(Visitor* visitor, const v8::JSMember<T>* self) {
template <>
struct TraceTrait<v8::internal::JSMemberBase> {
static void Trace(Visitor* visitor, const v8::internal::JSMemberBase* self) {
static_cast<v8::JSVisitor*>(visitor)->Trace(*self);
}
};
......
......@@ -35,7 +35,6 @@ class UnifiedHeapMarkingState {
};
void UnifiedHeapMarkingState::MarkAndPush(const JSMemberBase& ref) {
// TODO(chromium:1056170): Defer concurrent handling using the bailout.
heap_.RegisterExternallyReferencedObject(
JSMemberBaseExtractor::ObjectReference(ref));
}
......
......@@ -37,8 +37,18 @@ void UnifiedHeapMarkingVisitorBase::RegisterWeakCallback(WeakCallback callback,
marking_state_.RegisterWeakCallback(callback, object);
}
namespace {
void DeferredTraceJSMember(cppgc::Visitor* visitor, const void* ref) {
static_cast<JSVisitor*>(visitor)->Trace(
*static_cast<const internal::JSMemberBase*>(ref));
}
} // namespace
void UnifiedHeapMarkingVisitorBase::Visit(const internal::JSMemberBase& ref) {
unified_heap_marking_state_.MarkAndPush(ref);
bool should_defer_tracing =
DeferTraceToMutatorThreadIfConcurrent(&ref, DeferredTraceJSMember, 0);
if (!should_defer_tracing) unified_heap_marking_state_.MarkAndPush(ref);
}
MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor(
......@@ -66,5 +76,15 @@ ConcurrentUnifiedHeapMarkingVisitor::ConcurrentUnifiedHeapMarkingVisitor(
: UnifiedHeapMarkingVisitorBase(heap, marking_state,
unified_heap_marking_state) {}
bool ConcurrentUnifiedHeapMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, cppgc::TraceCallback callback,
size_t deferred_size) {
marking_state_.concurrent_marking_bailout_worklist().Push(
{parameter, callback, deferred_size});
static_cast<ConcurrentMarkingState&>(marking_state_)
.AccountDeferredMarkedBytes(deferred_size);
return true;
}
} // namespace internal
} // namespace v8
......@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
UnifiedHeapMarkingState& unified_heap_marking_state_;
};
class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase {
public:
MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
......@@ -61,7 +61,7 @@ class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
const void*) final;
};
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase {
public:
ConcurrentUnifiedHeapMarkingVisitor(HeapBase&, ConcurrentMarkingState&,
......@@ -74,6 +74,9 @@ class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor
const void*) final {
UNREACHABLE();
}
bool DeferTraceToMutatorThreadIfConcurrent(const void*, cppgc::TraceCallback,
size_t) final;
};
} // namespace internal
......
......@@ -103,10 +103,10 @@ void ConcurrentMarkingTask::ProcessWorklists(
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) {
return;
}
......@@ -124,9 +124,9 @@ void ConcurrentMarkingTask::ProcessWorklists(
DCHECK(!header.IsInConstruction<
HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
concurrent_marking_state.AccountMarkedBytes(header);
item.callback(&concurrent_marking_visitor,
item.base_object_payload);
concurrent_marking_state.AccountMarkedBytes(header);
})) {
return;
}
......@@ -138,10 +138,10 @@ void ConcurrentMarkingTask::ProcessWorklists(
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
concurrent_marking_state.AccountMarkedBytes(*header);
})) {
return;
}
......@@ -168,7 +168,13 @@ void ConcurrentMarkerBase::Start() {
}
void ConcurrentMarkerBase::Cancel() {
if (concurrent_marking_handle_) concurrent_marking_handle_->Cancel();
if (concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning())
concurrent_marking_handle_->Cancel();
}
void ConcurrentMarkerBase::JoinForTesting() {
if (concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning())
concurrent_marking_handle_->Join();
}
ConcurrentMarkerBase::~ConcurrentMarkerBase() {
......
......@@ -26,6 +26,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarkerBase {
void Start();
void Cancel();
void JoinForTesting();
bool NotifyIncrementalMutatorStepCompleted();
HeapBase& heap() const { return heap_; }
......
......@@ -190,6 +190,7 @@ void MarkerBase::StartMarking() {
ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
}
}
......@@ -335,13 +336,27 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do {
// Bailout objects may be complicated to trace and thus might take longer
// than other objects. Therefore we reduce the interval between deadline
// checks to guarantee the deadline is not exceeded.
if (!DrainWorklistWithBytesAndTimeDeadline<kDefaultDeadlineCheckInterval /
5>(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.concurrent_marking_bailout_worklist(),
[this](const MarkingWorklists::ConcurrentMarkingBailoutItem& item) {
mutator_marking_state_.AccountMarkedBytes(item.bailedout_size);
item.callback(&visitor(), item.parameter);
})) {
return false;
}
if (!DrainWorklistWithBytesAndTimeDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header);
})) {
return false;
}
......@@ -356,8 +371,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK(
header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
item.callback(&visitor(), item.base_object_payload);
mutator_marking_state_.AccountMarkedBytes(header);
item.callback(&visitor(), item.base_object_payload);
})) {
return false;
}
......@@ -366,9 +381,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_, marked_bytes_deadline, time_deadline,
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
mutator_marking_state_.AccountMarkedBytes(*header);
})) {
return false;
}
......@@ -396,6 +411,10 @@ void MarkerBase::DisableIncrementalMarkingForTesting() {
incremental_marking_disabled_for_testing_ = true;
}
void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),
......
......@@ -122,6 +122,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void DisableIncrementalMarkingForTesting();
void WaitForConcurrentMarkingForTesting();
protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
......
......@@ -31,6 +31,7 @@ class MarkingStateBase {
inline void RegisterWeakCallback(WeakCallback, const void*);
inline void AccountMarkedBytes(const HeapObjectHeader&);
inline void AccountMarkedBytes(size_t);
size_t marked_bytes() const { return marked_bytes_; }
void Publish() {
......@@ -38,6 +39,7 @@ class MarkingStateBase {
previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish();
concurrent_marking_bailout_worklist_.Publish();
}
MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
......@@ -57,6 +59,10 @@ class MarkingStateBase {
MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
return write_barrier_worklist_;
}
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local&
concurrent_marking_bailout_worklist() {
return concurrent_marking_bailout_worklist_;
}
protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
......@@ -74,6 +80,8 @@ class MarkingStateBase {
previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
MarkingWorklists::ConcurrentMarkingBailoutWorklist::Local
concurrent_marking_bailout_worklist_;
size_t marked_bytes_ = 0;
};
......@@ -90,7 +98,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
write_barrier_worklist_(marking_worklists.write_barrier_worklist()),
concurrent_marking_bailout_worklist_(
marking_worklists.concurrent_marking_bailout_worklist()) {
}
void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
......@@ -141,11 +151,15 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
}
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
AccountMarkedBytes(
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>();
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
}
void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
marked_bytes_ += marked_bytes;
}
class MutatorMarkingState : public MarkingStateBase {
......@@ -208,6 +222,13 @@ class ConcurrentMarkingState : public MarkingStateBase {
return marked_bytes_ - std::exchange(last_marked_bytes_, marked_bytes_);
}
inline void AccountDeferredMarkedBytes(size_t deferred_bytes) {
// AccountDeferredMarkedBytes is called from Trace methods, which are always
// called after AccountMarkedBytes, so there should be no underflow here.
DCHECK_LE(deferred_bytes, marked_bytes_);
marked_bytes_ -= deferred_bytes;
}
private:
size_t last_marked_bytes_ = 0;
};
......
......@@ -38,8 +38,8 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor(
void ConservativeMarkingVisitor::VisitConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
marking_state_.MarkNoPush(header);
callback(this, header);
marking_state_.AccountMarkedBytes(header);
callback(this, header);
}
MutatorMarkingVisitor::MutatorMarkingVisitor(HeapBase& heap,
......@@ -67,5 +67,14 @@ void ConservativeMarkingVisitor::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
bool ConcurrentMarkingVisitor::DeferTraceToMutatorThreadIfConcurrent(
const void* parameter, TraceCallback callback, size_t deferred_size) {
marking_state_.concurrent_marking_bailout_worklist().Push(
{parameter, callback, deferred_size});
static_cast<ConcurrentMarkingState&>(marking_state_)
.AccountDeferredMarkedBytes(deferred_size);
return true;
}
} // namespace internal
} // namespace cppgc
......@@ -44,7 +44,8 @@ class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
const void*) final;
};
class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase {
class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor final
: public MarkingVisitorBase {
public:
ConcurrentMarkingVisitor(HeapBase&, ConcurrentMarkingState&);
~ConcurrentMarkingVisitor() override = default;
......@@ -55,6 +56,9 @@ class V8_EXPORT_PRIVATE ConcurrentMarkingVisitor : public MarkingVisitorBase {
const void*) final {
UNREACHABLE();
}
bool DeferTraceToMutatorThreadIfConcurrent(const void*, TraceCallback,
size_t) final;
};
class ConservativeMarkingVisitor : public ConservativeTracingVisitor,
......
......@@ -16,6 +16,7 @@ void MarkingWorklists::ClearForTesting() {
previously_not_fully_constructed_worklist_.Clear();
write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear();
concurrent_marking_bailout_worklist_.Clear();
}
void MarkingWorklists::NotFullyConstructedWorklist::Push(
......
......@@ -21,11 +21,18 @@ class MarkingWorklists {
static constexpr int kMutatorThreadId = 0;
using MarkingItem = cppgc::TraceDescriptor;
struct WeakCallbackItem {
cppgc::WeakCallback callback;
const void* parameter;
};
struct ConcurrentMarkingBailoutItem {
const void* parameter;
TraceCallback callback;
size_t bailedout_size;
};
// Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist =
......@@ -36,6 +43,9 @@ class MarkingWorklists {
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist =
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
using ConcurrentMarkingBailoutWorklist =
heap::base::Worklist<ConcurrentMarkingBailoutItem,
64 /* local entries */>;
class V8_EXPORT_PRIVATE NotFullyConstructedWorklist {
public:
......@@ -72,6 +82,9 @@ class MarkingWorklists {
WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_;
}
ConcurrentMarkingBailoutWorklist* concurrent_marking_bailout_worklist() {
return &concurrent_marking_bailout_worklist_;
}
void ClearForTesting();
......@@ -82,6 +95,7 @@ class MarkingWorklists {
previously_not_fully_constructed_worklist_;
WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_;
ConcurrentMarkingBailoutWorklist concurrent_marking_bailout_worklist_;
};
} // namespace internal
......
......@@ -16,51 +16,17 @@
namespace cppgc {
namespace internal {
#if defined(THREAD_SANITIZER)
namespace {
class GCed : public GarbageCollected<GCed> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCed> child_;
};
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
explicit GCedWithCallback(Callback callback) {
callback(this);
}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCedWithCallback> child_;
};
class Mixin : public GarbageCollectedMixin {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<Mixin> child_;
};
class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
public:
void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
};
template <typename T>
class GCedHolder : public GarbageCollected<GCedHolder<T>> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object_); }
Member<T> object_;
};
class ConcurrentMarkingTest : public testing::TestWithHeap {
public:
#if defined(THREAD_SANITIZER)
// Use more iteration on tsan builds to expose data races.
static constexpr int kNumStep = 1000;
#else
static constexpr int kNumStep = 10;
#endif // defined(THREAD_SANITIZER)
using Config = Heap::Config;
static constexpr Config ConcurrentPreciseConfig = {
Config::CollectionType::kMajor, Config::StackState::kNoHeapPointers,
......@@ -95,16 +61,52 @@ class ConcurrentMarkingTest : public testing::TestWithHeap {
constexpr ConcurrentMarkingTest::Config
ConcurrentMarkingTest::ConcurrentPreciseConfig;
template <typename T>
struct GCedHolder : public GarbageCollected<GCedHolder<T>> {
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object); }
Member<T> object;
};
class GCed : public GarbageCollected<GCed> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCed> child_;
};
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
explicit GCedWithCallback(Callback callback) {
callback(this);
}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCedWithCallback> child_;
};
class Mixin : public GarbageCollectedMixin {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<Mixin> child_;
};
class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
public:
void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
};
} // namespace
// The following tests below check for data races during concurrent marking.
TEST_F(ConcurrentMarkingTest, MarkingObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<GCed>> root =
MakeGarbageCollected<GCedHolder<GCed>>(GetAllocationHandle());
Member<GCed>* last_object = &root->object_;
Member<GCed>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCed>(GetAllocationHandle());
......@@ -117,11 +119,10 @@ TEST_F(ConcurrentMarkingTest, MarkingObjects) {
}
TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<GCedWithCallback>> root =
MakeGarbageCollected<GCedHolder<GCedWithCallback>>(GetAllocationHandle());
Member<GCedWithCallback>* last_object = &root->object_;
Member<GCedWithCallback>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
MakeGarbageCollected<GCedWithCallback>(
......@@ -137,11 +138,10 @@ TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
}
TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<Mixin>> root =
MakeGarbageCollected<GCedHolder<Mixin>>(GetAllocationHandle());
Member<Mixin>* last_object = &root->object_;
Member<Mixin>* last_object = &root->object;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
......@@ -153,7 +153,58 @@ TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
FinishGC();
}
#endif // defined(THREAD_SANITIZER)
namespace {
struct ConcurrentlyTraceable : public GarbageCollected<ConcurrentlyTraceable> {
static size_t trace_counter;
void Trace(Visitor*) const { ++trace_counter; }
};
size_t ConcurrentlyTraceable::trace_counter = 0;
struct NotConcurrentlyTraceable
: public GarbageCollected<NotConcurrentlyTraceable> {
static size_t trace_counter;
void Trace(Visitor* visitor) const {
if (visitor->DeferTraceToMutatorThreadIfConcurrent(
this,
[](Visitor*, const void*) {
++NotConcurrentlyTraceable::trace_counter;
},
sizeof(NotConcurrentlyTraceable)))
return;
++trace_counter;
}
};
size_t NotConcurrentlyTraceable::trace_counter = 0;
} // namespace
TEST_F(ConcurrentMarkingTest, ConcurrentlyTraceableObjectIsTracedConcurrently) {
Persistent<GCedHolder<ConcurrentlyTraceable>> root =
MakeGarbageCollected<GCedHolder<ConcurrentlyTraceable>>(
GetAllocationHandle());
root->object =
MakeGarbageCollected<ConcurrentlyTraceable>(GetAllocationHandle());
EXPECT_EQ(0u, ConcurrentlyTraceable::trace_counter);
StartConcurrentGC();
GetMarkerRef()->WaitForConcurrentMarkingForTesting();
EXPECT_NE(0u, ConcurrentlyTraceable::trace_counter);
FinishGC();
}
TEST_F(ConcurrentMarkingTest,
NotConcurrentlyTraceableObjectIsNotTracedConcurrently) {
Persistent<GCedHolder<NotConcurrentlyTraceable>> root =
MakeGarbageCollected<GCedHolder<NotConcurrentlyTraceable>>(
GetAllocationHandle());
root->object =
MakeGarbageCollected<NotConcurrentlyTraceable>(GetAllocationHandle());
EXPECT_EQ(0u, NotConcurrentlyTraceable::trace_counter);
StartConcurrentGC();
GetMarkerRef()->WaitForConcurrentMarkingForTesting();
EXPECT_EQ(0u, NotConcurrentlyTraceable::trace_counter);
FinishGC();
}
} // namespace internal
} // namespace cppgc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment