Commit c7ff90d9 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

cppgc: Avoid dispatching write barrier during atomic pause

This change avoid dispatching a write barrier during the atomic pause.
The dispatch can generally be triggered through pre-finalizers.

In future, further checks may be added to avoid mis-use of
pre-finalizers.

Bug: chromium:1056170, chromium:1175560
Change-Id: I119e18372633b2375f60e17b4c881f68bb20bf66
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2679685Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72560}
parent 0c8b6e41
...@@ -55,7 +55,7 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB, ...@@ -55,7 +55,7 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
struct CagedHeapLocalData final { struct CagedHeapLocalData final {
explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {} explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {}
bool is_marking_in_progress = false; bool is_incremental_marking_in_progress = false;
HeapBase* heap_base = nullptr; HeapBase* heap_base = nullptr;
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table; AgeTable age_table;
......
...@@ -138,7 +138,7 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final { ...@@ -138,7 +138,7 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
if (!TryGetCagedHeap(value, value, params)) { if (!TryGetCagedHeap(value, value, params)) {
return WriteBarrier::Type::kNone; return WriteBarrier::Type::kNone;
} }
if (V8_UNLIKELY(params.caged_heap().is_marking_in_progress)) { if (V8_UNLIKELY(params.caged_heap().is_incremental_marking_in_progress)) {
return SetAndReturnType<WriteBarrier::Type::kMarking>(params); return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
} }
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
...@@ -182,7 +182,7 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< ...@@ -182,7 +182,7 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
if (!within_cage) { if (!within_cage) {
return WriteBarrier::Type::kNone; return WriteBarrier::Type::kNone;
} }
if (V8_LIKELY(!params.caged_heap().is_marking_in_progress)) { if (V8_LIKELY(!params.caged_heap().is_incremental_marking_in_progress)) {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
params.heap = reinterpret_cast<HeapHandle*>(params.start); params.heap = reinterpret_cast<HeapHandle*>(params.start);
params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start; params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
...@@ -246,28 +246,19 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final { ...@@ -246,28 +246,19 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
static V8_INLINE WriteBarrier::Type GetForExternallyReferenced( static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
const void* value, WriteBarrier::Params& params, const void* value, WriteBarrier::Params& params,
HeapHandleCallback callback) { HeapHandleCallback callback) {
return GetInternal(params, callback); // The slot will never be used in `Get()` below.
return Get<WriteBarrier::ValueMode::kValuePresent>(nullptr, value, params,
callback);
} }
private: private:
template <WriteBarrier::ValueMode value_mode> template <WriteBarrier::ValueMode value_mode>
struct ValueModeDispatch; struct ValueModeDispatch;
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type GetInternal(WriteBarrier::Params& params,
HeapHandleCallback callback) {
if (V8_UNLIKELY(ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) {
HeapHandle& handle = callback();
if (subtle::HeapState::IsMarking(handle)) {
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
}
return WriteBarrier::Type::kNone;
}
// TODO(chromium:1056170): Create fast path on API. // TODO(chromium:1056170): Create fast path on API.
static bool IsMarking(const void*, HeapHandle**); static bool IsMarking(const void*, HeapHandle**);
// TODO(chromium:1056170): Create fast path on API.
static bool IsMarking(HeapHandle&);
WriteBarrierTypeForNonCagedHeapPolicy() = delete; WriteBarrierTypeForNonCagedHeapPolicy() = delete;
}; };
...@@ -294,10 +285,17 @@ template <> ...@@ -294,10 +285,17 @@ template <>
struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kNoValuePresent> { WriteBarrier::ValueMode::kNoValuePresent> {
template <typename HeapHandleCallback> template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*, static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
WriteBarrier::Params& params, WriteBarrier::Params& params,
HeapHandleCallback callback) { HeapHandleCallback callback) {
return GetInternal(params, callback); if (V8_UNLIKELY(ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) {
HeapHandle& handle = callback();
if (IsMarking(handle)) {
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
}
return WriteBarrier::Type::kNone;
} }
}; };
......
...@@ -280,11 +280,11 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) { ...@@ -280,11 +280,11 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this); cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause(); marker_->LeaveAtomicPause();
} }
marker_.reset();
{ {
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this); cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
prefinalizer_handler()->InvokePreFinalizers(); prefinalizer_handler()->InvokePreFinalizers();
} }
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag. // TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG #if DEBUG
UnifiedHeapMarkingVerifier verifier(*this); UnifiedHeapMarkingVerifier verifier(*this);
......
...@@ -183,12 +183,12 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) { ...@@ -183,12 +183,12 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this); cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
marker_->FinishMarking(config_.stack_state); marker_->FinishMarking(config_.stack_state);
} }
marker_.reset();
{ {
// Pre finalizers are forbidden from allocating objects. // Pre finalizers are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this); cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
prefinalizer_handler_->InvokePreFinalizers(); prefinalizer_handler_->InvokePreFinalizers();
} }
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag. // TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG #if DEBUG
MarkingVerifier verifier(*this); MarkingVerifier verifier(*this);
......
...@@ -35,7 +35,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -35,7 +35,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) { Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::EnterIncrementalOrConcurrentMarking(); ProcessHeap::EnterIncrementalOrConcurrentMarking();
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_marking_in_progress = true; heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif #endif
return true; return true;
} }
...@@ -49,7 +49,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -49,7 +49,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) { Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::ExitIncrementalOrConcurrentMarking(); ProcessHeap::ExitIncrementalOrConcurrentMarking();
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_marking_in_progress = false; heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif #endif
return true; return true;
} }
......
...@@ -28,7 +28,7 @@ void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker, ...@@ -28,7 +28,7 @@ void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
DCHECK(reinterpret_cast<CagedHeapLocalData*>( DCHECK(reinterpret_cast<CagedHeapLocalData*>(
reinterpret_cast<uintptr_t>(value) & reinterpret_cast<uintptr_t>(value) &
~(kCagedHeapReservationAlignment - 1)) ~(kCagedHeapReservationAlignment - 1))
->is_marking_in_progress); ->is_incremental_marking_in_progress);
#endif #endif
DCHECK(header.IsMarked<AccessMode::kAtomic>()); DCHECK(header.IsMarked<AccessMode::kAtomic>());
DCHECK(marker); DCHECK(marker);
...@@ -61,9 +61,10 @@ void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) { ...@@ -61,9 +61,10 @@ void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value); const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap(); const auto* heap = page->heap();
// Marker being not set up means that no incremental/concurrent marking is in // GetWriteBarrierType() checks marking state.
// progress. DCHECK(heap->marker());
if (!heap->marker()) return; // No write barriers should be executed from atomic pause marking.
DCHECK(!heap->in_atomic_pause());
auto& header = auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value)); const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
...@@ -77,10 +78,11 @@ void WriteBarrier::DijkstraMarkingBarrierRangeSlow( ...@@ -77,10 +78,11 @@ void WriteBarrier::DijkstraMarkingBarrierRangeSlow(
HeapHandle& heap_handle, const void* first_element, size_t element_size, HeapHandle& heap_handle, const void* first_element, size_t element_size,
size_t number_of_elements, TraceCallback trace_callback) { size_t number_of_elements, TraceCallback trace_callback) {
auto& heap_base = HeapBase::From(heap_handle); auto& heap_base = HeapBase::From(heap_handle);
MarkerBase* marker = heap_base.marker();
if (!marker) { // GetWriteBarrierType() checks marking state.
return; DCHECK(heap_base.marker());
} // No write barriers should be executed from atomic pause marking.
DCHECK(!heap_base.in_atomic_pause());
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_base); cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_base);
const char* array = static_cast<const char*>(first_element); const char* array = static_cast<const char*>(first_element);
...@@ -103,9 +105,10 @@ void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) { ...@@ -103,9 +105,10 @@ void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value); const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap(); const auto* heap = page->heap();
// Marker being not set up means that no incremental/concurrent marking is in // GetWriteBarrierType() checks marking state.
// progress. DCHECK(heap->marker());
if (!heap->marker()) return; // No write barriers should be executed from atomic pause marking.
DCHECK(!heap->in_atomic_pause());
auto& header = auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value)); const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
...@@ -120,6 +123,11 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data, ...@@ -120,6 +123,11 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& age_table, const AgeTable& age_table,
const void* slot, const void* slot,
uintptr_t value_offset) { uintptr_t value_offset) {
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
if (local_data.heap_base->in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld) if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return; return;
// Record slot. // Record slot.
...@@ -144,6 +152,12 @@ bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object, ...@@ -144,6 +152,12 @@ bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
return page->heap()->marker(); return page->heap()->marker();
} }
// static
bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(HeapHandle& heap_handle) {
const auto& heap_base = internal::HeapBase::From(heap_handle);
return heap_base.marker();
}
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
// static // static
......
...@@ -294,26 +294,26 @@ class IncrementalMarkingTest : public testing::TestWithHeap { ...@@ -294,26 +294,26 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
} }
void FinishMarking() { void FinishMarking() {
marker_->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers); GetMarkerRef()->FinishMarking(
MarkingConfig::StackState::kMayContainHeapPointers);
// Pretend do finish sweeping as StatsCollector verifies that Notify* // Pretend do finish sweeping as StatsCollector verifies that Notify*
// methods are called in the right order. // methods are called in the right order.
GetMarkerRef().reset();
Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted(); Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
} }
void InitializeMarker(HeapBase& heap, cppgc::Platform* platform, void InitializeMarker(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config) { MarkingConfig config) {
marker_ = GetMarkerRef() =
MarkerFactory::CreateAndStartMarking<Marker>(heap, platform, config); MarkerFactory::CreateAndStartMarking<Marker>(heap, platform, config);
} }
Marker* marker() const { return marker_.get(); } MarkerBase* marker() const { return GetMarkerRef().get(); }
private: private:
bool SingleStep(MarkingConfig::StackState stack_state) { bool SingleStep(MarkingConfig::StackState stack_state) {
return marker_->IncrementalMarkingStepForTesting(stack_state); return GetMarkerRef()->IncrementalMarkingStepForTesting(stack_state);
} }
std::unique_ptr<Marker> marker_;
}; };
constexpr IncrementalMarkingTest::MarkingConfig constexpr IncrementalMarkingTest::MarkingConfig
...@@ -348,9 +348,8 @@ TEST_F(IncrementalMarkingTest, ...@@ -348,9 +348,8 @@ TEST_F(IncrementalMarkingTest,
InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(),
IncrementalPreciseMarkingConfig); IncrementalPreciseMarkingConfig);
root->SetChild(MakeGarbageCollected<GCed>(GetAllocationHandle())); root->SetChild(MakeGarbageCollected<GCed>(GetAllocationHandle()));
HeapObjectHeader& header = HeapObjectHeader::FromPayload(root->child());
EXPECT_FALSE(header.IsMarked());
FinishSteps(MarkingConfig::StackState::kNoHeapPointers); FinishSteps(MarkingConfig::StackState::kNoHeapPointers);
HeapObjectHeader& header = HeapObjectHeader::FromPayload(root->child());
EXPECT_TRUE(header.IsMarked()); EXPECT_TRUE(header.IsMarked());
FinishMarking(); FinishMarking();
} }
......
...@@ -171,6 +171,77 @@ TEST_F(PrefinalizerTest, PrefinalizerInvocationPreservesOrder) { ...@@ -171,6 +171,77 @@ TEST_F(PrefinalizerTest, PrefinalizerInvocationPreservesOrder) {
namespace { namespace {
class LinkedNode final : public GarbageCollected<LinkedNode> {
public:
explicit LinkedNode(LinkedNode* next) : next_(next) {}
void Trace(Visitor* visitor) const { visitor->Trace(next_); }
LinkedNode* next() const { return next_; }
void RemoveNext() {
CHECK(next_);
next_ = next_->next_;
}
private:
Member<LinkedNode> next_;
};
class MutatingPrefinalizer final
: public GarbageCollected<MutatingPrefinalizer> {
CPPGC_USING_PRE_FINALIZER(MutatingPrefinalizer, PreFinalizer);
public:
void PreFinalizer() {
// Pre-finalizers are generally used to mutate the object graph. The API
// does not allow distinguishing between live and dead objects. It is
// generally safe to re-write the dead *or* the live object graph. Adding
// a dead object to the live graph must not happen.
//
// RemoveNext() must not trigger a write barrier. In the case all LinkedNode
// objects die at the same time, the graph is mutated with a dead object.
// This is only safe when the dead object is added to a dead subgraph.
parent_node_->RemoveNext();
}
explicit MutatingPrefinalizer(LinkedNode* parent) : parent_node_(parent) {}
void Trace(Visitor* visitor) const { visitor->Trace(parent_node_); }
private:
Member<LinkedNode> parent_node_;
};
} // namespace
TEST_F(PrefinalizerTest, PrefinalizerCanRewireGraphWithLiveObjects) {
Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(GetAllocationHandle(), nullptr)))};
CHECK(root->next());
MakeGarbageCollected<MutatingPrefinalizer>(GetAllocationHandle(), root.Get());
PreciseGC();
}
TEST_F(PrefinalizerTest, PrefinalizerCanRewireGraphWithDeadObjects) {
Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(GetAllocationHandle(), nullptr)))};
CHECK(root->next());
MakeGarbageCollected<MutatingPrefinalizer>(GetAllocationHandle(), root.Get());
// All LinkedNode objects will die on the following GC. The pre-finalizer may
// still operate with them but not add them to a live object.
root.Clear();
PreciseGC();
}
namespace {
class AllocatingPrefinalizer : public GarbageCollected<AllocatingPrefinalizer> { class AllocatingPrefinalizer : public GarbageCollected<AllocatingPrefinalizer> {
CPPGC_USING_PRE_FINALIZER(AllocatingPrefinalizer, PreFinalizer); CPPGC_USING_PRE_FINALIZER(AllocatingPrefinalizer, PreFinalizer);
......
...@@ -86,6 +86,10 @@ class TestWithHeap : public TestWithPlatform { ...@@ -86,6 +86,10 @@ class TestWithHeap : public TestWithPlatform {
return Heap::From(GetHeap())->marker_; return Heap::From(GetHeap())->marker_;
} }
const std::unique_ptr<MarkerBase>& GetMarkerRef() const {
return Heap::From(GetHeap())->marker_;
}
void ResetLinearAllocationBuffers(); void ResetLinearAllocationBuffers();
private: private:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment