Commit 3b82f4c6 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

cppgc: Expose write barriers

Exposes an opaque handle for uniformly (cppgc and V8) referring to an
instance of a heap.

Exposes a set of raw write barriers for advances embedders through
subtle::HeapConsistency which is a mirror into write barrier internals.
The following barriers are exposed:
- DijkstraWriteBarrier: Regular Dijkstra-style write barrier (add to
  wavefront);
- DijkstraWriteBarrierRange: Same as DijkstraWriteBarrier but
  operating on a range of slots that are composite (inlined) objects;
- SteeleWriteBarrier: Regular Steele-style write barrier (retreating
  wavefront);

Change-Id: Ib5ac280204686bf887690f72df1cdb506ea6ef70
Bug: chromium:1056170
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2554601Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71381}
parent 3836aeb0
...@@ -4438,6 +4438,7 @@ v8_source_set("cppgc_base") { ...@@ -4438,6 +4438,7 @@ v8_source_set("cppgc_base") {
"include/cppgc/default-platform.h", "include/cppgc/default-platform.h",
"include/cppgc/ephemeron-pair.h", "include/cppgc/ephemeron-pair.h",
"include/cppgc/garbage-collected.h", "include/cppgc/garbage-collected.h",
"include/cppgc/heap-consistency.h",
"include/cppgc/heap.h", "include/cppgc/heap.h",
"include/cppgc/internal/api-constants.h", "include/cppgc/internal/api-constants.h",
"include/cppgc/internal/atomic-entry-flag.h", "include/cppgc/internal/atomic-entry-flag.h",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_CPPGC_HEAP_CONSISTENCY_H_
#define INCLUDE_CPPGC_HEAP_CONSISTENCY_H_
#include <cstddef>
#include "cppgc/internal/write-barrier.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
class HeapHandle;
namespace subtle {
/**
* **DO NOT USE: Use the appropriate managed types.**
*
* Consistency helpers that aid in maintaining a consistent internal state of
* the garbage collector.
*/
class HeapConsistency final {
public:
/**
* Conservative Dijkstra-style write barrier that processes an object if it
* has not yet been processed.
*
* \param slot A slot containing the pointer to the object. The slot itself
* must reside in an object that has been allocated using
* `MakeGarbageCollected()`.
* \param value The pointer to the object. May be an interior pointer to a
* an interface of the actual object.
*/
static V8_INLINE void DijkstraWriteBarrier(const void* slot,
const void* value) {
internal::WriteBarrier::DijkstraMarkingBarrier(slot, value);
}
/**
* Conservative Dijkstra-style write barrier that processes a range of
* elements if they have not yet been processed.
*
* \param heap_callback A callback to retrieve the corresponding heap if
* necessary.
* \param first_element Pointer to the first element that should be processed.
* The slot itself must reside in an object that has been allocated using
* `MakeGarbageCollected()`.
* \param element_size Size of the element in bytes.
* \param number_of_elements Number of elements that should be processed,
* starting with `first_element`.
* \param trace_callback The trace callback that should be invoked for each
* element if necessary.
*/
template <typename LazyHeapCallback>
static V8_INLINE void DijkstraWriteBarrierRange(
LazyHeapCallback heap_callback, const void* first_element,
size_t element_size, size_t number_of_elements,
TraceCallback trace_callback) {
internal::WriteBarrier::DijkstraMarkingBarrierRange(
heap_callback, first_element, element_size, number_of_elements,
trace_callback);
}
/**
* Steele-style write barrier that re-processes an object if it has already
* been processed.
*
* \param object The pointer to the object which must point to an object that
* has been allocated using `MakeGarbageCollected()`. Interior pointers are
* not supported.
*/
static V8_INLINE void SteeleWriteBarrier(const void* object) {
internal::WriteBarrier::SteeleMarkingBarrier(object);
}
private:
HeapConsistency() = delete;
};
} // namespace subtle
} // namespace cppgc
#endif // INCLUDE_CPPGC_HEAP_CONSISTENCY_H_
...@@ -29,6 +29,11 @@ namespace internal { ...@@ -29,6 +29,11 @@ namespace internal {
class Heap; class Heap;
} // namespace internal } // namespace internal
/**
* Used for additional heap APIs.
*/
class HeapHandle;
class V8_EXPORT Heap { class V8_EXPORT Heap {
public: public:
/** /**
...@@ -132,6 +137,12 @@ class V8_EXPORT Heap { ...@@ -132,6 +137,12 @@ class V8_EXPORT Heap {
*/ */
AllocationHandle& GetAllocationHandle(); AllocationHandle& GetAllocationHandle();
/**
* \returns the opaque heap handle which may be used to refer to this heap in
* other APIs. Valid as long as the underlying `Heap` is alive.
*/
HeapHandle& GetHeapHandle();
private: private:
Heap() = default; Heap() = default;
......
...@@ -28,7 +28,7 @@ struct DijkstraWriteBarrierPolicy { ...@@ -28,7 +28,7 @@ struct DijkstraWriteBarrierPolicy {
// barrier doesn't break the tri-color invariant. // barrier doesn't break the tri-color invariant.
} }
static void AssigningBarrier(const void* slot, const void* value) { static void AssigningBarrier(const void* slot, const void* value) {
WriteBarrier::MarkingBarrier(slot, value); WriteBarrier::DijkstraMarkingBarrier(slot, value);
} }
}; };
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "cppgc/internal/api-constants.h" #include "cppgc/internal/api-constants.h"
#include "cppgc/internal/process-heap.h" #include "cppgc/internal/process-heap.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
...@@ -14,51 +15,137 @@ ...@@ -14,51 +15,137 @@
#endif #endif
namespace cppgc { namespace cppgc {
class HeapHandle;
namespace internal { namespace internal {
class V8_EXPORT WriteBarrier final { class V8_EXPORT WriteBarrier final {
public: public:
static V8_INLINE void MarkingBarrier(const void* slot, const void* value) { static V8_INLINE void DijkstraMarkingBarrier(const void* slot,
const void* value) {
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
const uintptr_t start = CagedHeapResult result;
reinterpret_cast<uintptr_t>(value) & if (!TryGetCagedHeap(slot, value, result)) {
~(api_constants::kCagedHeapReservationAlignment - 1); return;
const uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) - start; }
if (slot_offset > api_constants::kCagedHeapReservationSize) { if (V8_UNLIKELY(result.caged_heap().is_marking_in_progress)) {
// Check if slot is on stack or value is sentinel or nullptr. This relies DijkstraMarkingBarrierSlow(value);
// on the fact that kSentinelPointer is encoded as 0x1. return;
}
#if defined(CPPGC_YOUNG_GENERATION)
GenerationalBarrier(result.caged_heap(), slot, result.slot_offset,
reinterpret_cast<uintptr_t>(value) - result.start);
#endif
#else
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) return;
DijkstraMarkingBarrierSlowWithSentinelCheck(value);
#endif // CPPGC_CAGED_HEAP
}
template <typename LazyHeapCallback>
static V8_INLINE void DijkstraMarkingBarrierRange(
LazyHeapCallback heap_callback, const void* first_element,
size_t element_size, size_t number_of_elements,
TraceCallback trace_callback) {
#if defined(CPPGC_CAGED_HEAP)
CagedHeapResult result;
if (!TryGetCagedHeap(first_element, first_element, result)) {
return;
}
if (V8_UNLIKELY(result.caged_heap().is_marking_in_progress)) {
DijkstraMarkingBarrierRangeSlow(heap_callback(), first_element,
element_size, number_of_elements,
trace_callback);
return;
}
#if defined(CPPGC_YOUNG_GENERATION)
// We pass 0 as value offset to indicate that there's no information about
// the value.
GenerationalBarrier(result.caged_heap(), first_element, result.slot_offset,
0);
#endif
#else
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) return;
// TODO(1056170): Inline a check for is marking on the API if necessary.
DijkstraMarkingBarrierRangeSlow(heap_callback(), first_element,
element_size, number_of_elements,
trace_callback);
#endif // CPPGC_CAGED_HEAP
}
static V8_INLINE void SteeleMarkingBarrier(const void* object) {
#if defined(CPPGC_CAGED_HEAP)
CagedHeapResult result;
// The passed slot here is contained within object's header, resulting in
// `result` referring to the interiors of `object`.
if (!TryGetCagedHeap(object, object, result)) {
return; return;
} }
CagedHeapLocalData* local_data = if (V8_UNLIKELY(result.caged_heap().is_marking_in_progress)) {
reinterpret_cast<CagedHeapLocalData*>(start); SteeleMarkingBarrierSlow(object);
if (V8_UNLIKELY(local_data->is_marking_in_progress)) {
MarkingBarrierSlow(value);
return; return;
} }
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
GenerationalBarrier(local_data, slot, slot_offset, // We pass 0 as value offset to indicate that there's no information about
reinterpret_cast<uintptr_t>(value) - start); // the value.
GenerationalBarrier(result.caged_heap(), object, result.slot_offset, 0);
#endif #endif
#else #else
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) return; if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) return;
MarkingBarrierSlowWithSentinelCheck(value); SteeleMarkingBarrierSlowWithSentinelCheck(object);
#endif // CPPGC_CAGED_HEAP #endif // CPPGC_CAGED_HEAP
} }
private: private:
WriteBarrier() = delete; WriteBarrier() = delete;
static void MarkingBarrierSlow(const void* value); #if defined(CPPGC_CAGED_HEAP)
static void MarkingBarrierSlowWithSentinelCheck(const void* value); struct CagedHeapResult {
uintptr_t start;
uintptr_t slot_offset;
CagedHeapLocalData& caged_heap() const {
return *reinterpret_cast<CagedHeapLocalData*>(start);
}
};
// The contents of `result` are only valid if `TryCagedHeap()` returns true.
static V8_INLINE bool TryGetCagedHeap(const void* slot, const void* value,
CagedHeapResult& result) {
result.start = reinterpret_cast<uintptr_t>(value) &
~(api_constants::kCagedHeapReservationAlignment - 1);
result.slot_offset = reinterpret_cast<uintptr_t>(slot) - result.start;
if (result.slot_offset > api_constants::kCagedHeapReservationSize) {
// Check if slot is on stack or value is sentinel or nullptr. This relies
// on the fact that kSentinelPointer is encoded as 0x1.
return false;
}
return true;
}
#endif // CPPGC_CAGED_HEAP
static void DijkstraMarkingBarrierSlow(const void* value);
static void DijkstraMarkingBarrierSlowWithSentinelCheck(const void* value);
static void DijkstraMarkingBarrierRangeSlow(HeapHandle& heap_handle,
const void* first_element,
size_t element_size,
size_t number_of_elements,
TraceCallback trace_callback);
static void SteeleMarkingBarrierSlow(const void* value);
static void SteeleMarkingBarrierSlowWithSentinelCheck(const void* value);
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
static V8_INLINE void GenerationalBarrier(CagedHeapLocalData* local_data, static V8_INLINE void GenerationalBarrier(CagedHeapLocalData& local_data,
const void* slot, const void* slot,
uintptr_t slot_offset, uintptr_t slot_offset,
uintptr_t value_offset) { uintptr_t value_offset) {
const AgeTable& age_table = local_data->age_table; const AgeTable& age_table = local_data.age_table;
// Bail out if the slot is in young generation. // Bail out if the slot is in young generation.
if (V8_LIKELY(age_table[slot_offset] == AgeTable::Age::kYoung)) return; if (V8_LIKELY(age_table[slot_offset] == AgeTable::Age::kYoung)) return;
...@@ -66,10 +153,10 @@ class V8_EXPORT WriteBarrier final { ...@@ -66,10 +153,10 @@ class V8_EXPORT WriteBarrier final {
GenerationalBarrierSlow(local_data, age_table, slot, value_offset); GenerationalBarrierSlow(local_data, age_table, slot, value_offset);
} }
static void GenerationalBarrierSlow(CagedHeapLocalData* local_data, static void GenerationalBarrierSlow(CagedHeapLocalData& local_data,
const AgeTable& ageTable, const AgeTable& ageTable,
const void* slot, uintptr_t value_offset); const void* slot, uintptr_t value_offset);
#endif #endif // CPPGC_YOUNG_GENERATION
}; };
} // namespace internal } // namespace internal
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
namespace cppgc { namespace cppgc {
class AllocationHandle; class AllocationHandle;
class HeapHandle;
} // namespace cppgc } // namespace cppgc
namespace v8 { namespace v8 {
...@@ -40,6 +41,12 @@ class V8_EXPORT CppHeap { ...@@ -40,6 +41,12 @@ class V8_EXPORT CppHeap {
*/ */
cppgc::AllocationHandle& GetAllocationHandle(); cppgc::AllocationHandle& GetAllocationHandle();
/**
* \returns the opaque heap handle which may be used to refer to this heap in
* other APIs. Valid as long as the underlying `CppHeap` is alive.
*/
cppgc::HeapHandle& GetHeapHandle();
private: private:
CppHeap() = default; CppHeap() = default;
......
...@@ -38,6 +38,10 @@ cppgc::AllocationHandle& CppHeap::GetAllocationHandle() { ...@@ -38,6 +38,10 @@ cppgc::AllocationHandle& CppHeap::GetAllocationHandle() {
return internal::CppHeap::From(this)->object_allocator(); return internal::CppHeap::From(this)->object_allocator();
} }
cppgc::HeapHandle& CppHeap::GetHeapHandle() {
return *internal::CppHeap::From(this);
}
namespace internal { namespace internal {
namespace { namespace {
......
...@@ -32,6 +32,12 @@ namespace cppgc { ...@@ -32,6 +32,12 @@ namespace cppgc {
class Platform; class Platform;
class V8_EXPORT HeapHandle {
private:
HeapHandle() = default;
friend class internal::HeapBase;
};
namespace internal { namespace internal {
namespace testing { namespace testing {
...@@ -43,7 +49,7 @@ class PreFinalizerHandler; ...@@ -43,7 +49,7 @@ class PreFinalizerHandler;
class StatsCollector; class StatsCollector;
// Base class for heap implementations. // Base class for heap implementations.
class V8_EXPORT_PRIVATE HeapBase { class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
public: public:
using StackSupport = cppgc::Heap::StackSupport; using StackSupport = cppgc::Heap::StackSupport;
...@@ -63,6 +69,13 @@ class V8_EXPORT_PRIVATE HeapBase { ...@@ -63,6 +69,13 @@ class V8_EXPORT_PRIVATE HeapBase {
HeapBase& heap_; HeapBase& heap_;
}; };
static HeapBase& From(cppgc::HeapHandle& heap_handle) {
return static_cast<HeapBase&>(heap_handle);
}
static const HeapBase& From(const cppgc::HeapHandle& heap_handle) {
return static_cast<const HeapBase&>(heap_handle);
}
HeapBase(std::shared_ptr<cppgc::Platform> platform, HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces, const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support); StackSupport stack_support);
......
...@@ -52,6 +52,8 @@ AllocationHandle& Heap::GetAllocationHandle() { ...@@ -52,6 +52,8 @@ AllocationHandle& Heap::GetAllocationHandle() {
return internal::Heap::From(this)->object_allocator(); return internal::Heap::From(this)->object_allocator();
} }
HeapHandle& Heap::GetHeapHandle() { return *internal::Heap::From(this); }
namespace internal { namespace internal {
namespace { namespace {
......
...@@ -60,7 +60,7 @@ void VisitRememberedSlots(HeapBase& heap, ...@@ -60,7 +60,7 @@ void VisitRememberedSlots(HeapBase& heap,
MutatorMarkingState& mutator_marking_state) { MutatorMarkingState& mutator_marking_state) {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
StatsCollector::EnabledScope stats_scope( StatsCollector::EnabledScope stats_scope(
heap(), StatsCollector::kVisitRememberedSets); heap, StatsCollector::kVisitRememberedSets);
for (void* slot : heap.remembered_slots()) { for (void* slot : heap.remembered_slots()) {
auto& slot_header = BasePage::FromInnerAddress(&heap, slot) auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
->ObjectHeaderFromInnerAddress(slot); ->ObjectHeaderFromInnerAddress(slot);
...@@ -69,7 +69,7 @@ void VisitRememberedSlots(HeapBase& heap, ...@@ -69,7 +69,7 @@ void VisitRememberedSlots(HeapBase& heap,
// top level (with the guarantee that no objects are currently being in // top level (with the guarantee that no objects are currently being in
// construction). This can be ensured by running young GCs from safe points // construction). This can be ensured by running young GCs from safe points
// or by reintroducing nested allocation scopes that avoid finalization. // or by reintroducing nested allocation scopes that avoid finalization.
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>()); DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
void* value = *reinterpret_cast<void**>(slot); void* value = *reinterpret_cast<void**>(slot);
mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value)); mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
......
...@@ -103,7 +103,7 @@ class V8_EXPORT_PRIVATE MarkerBase { ...@@ -103,7 +103,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
MutatorMarkingState& MutatorMarkingStateForTesting() { MutatorMarkingState& MutatorMarkingStateForTesting() {
return mutator_marking_state_; return mutator_marking_state_;
} }
cppgc::Visitor& VisitorForTesting() { return visitor(); } cppgc::Visitor& Visitor() { return visitor(); }
void ClearAllWorklistsForTesting(); void ClearAllWorklistsForTesting();
bool IncrementalMarkingStepForTesting(MarkingConfig::StackState); bool IncrementalMarkingStepForTesting(MarkingConfig::StackState);
......
...@@ -21,17 +21,15 @@ namespace internal { ...@@ -21,17 +21,15 @@ namespace internal {
namespace { namespace {
void MarkValue(const BasePage* page, MarkerBase* marker, const void* value) { void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
const void* value) {
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
DCHECK(reinterpret_cast<CagedHeapLocalData*>( DCHECK(reinterpret_cast<CagedHeapLocalData*>(
reinterpret_cast<uintptr_t>(value) & reinterpret_cast<uintptr_t>(value) &
~(kCagedHeapReservationAlignment - 1)) ~(kCagedHeapReservationAlignment - 1))
->is_marking_in_progress); ->is_marking_in_progress);
#endif #endif
auto& header = DCHECK(header.IsMarked<AccessMode::kAtomic>());
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
if (!header.TryMarkAtomic()) return;
DCHECK(marker); DCHECK(marker);
if (V8_UNLIKELY(header.IsInConstruction<AccessMode::kNonAtomic>())) { if (V8_UNLIKELY(header.IsInConstruction<AccessMode::kNonAtomic>())) {
...@@ -49,13 +47,16 @@ void MarkValue(const BasePage* page, MarkerBase* marker, const void* value) { ...@@ -49,13 +47,16 @@ void MarkValue(const BasePage* page, MarkerBase* marker, const void* value) {
} // namespace } // namespace
void WriteBarrier::MarkingBarrierSlowWithSentinelCheck(const void* value) { // static
void WriteBarrier::DijkstraMarkingBarrierSlowWithSentinelCheck(
const void* value) {
if (!value || value == kSentinelPointer) return; if (!value || value == kSentinelPointer) return;
MarkingBarrierSlow(value); DijkstraMarkingBarrierSlow(value);
} }
void WriteBarrier::MarkingBarrierSlow(const void* value) { // static
void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value); const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap(); const auto* heap = page->heap();
...@@ -63,17 +64,66 @@ void WriteBarrier::MarkingBarrierSlow(const void* value) { ...@@ -63,17 +64,66 @@ void WriteBarrier::MarkingBarrierSlow(const void* value) {
// progress. // progress.
if (!heap->marker()) return; if (!heap->marker()) return;
MarkValue(page, heap->marker(), value); auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
if (!header.TryMarkAtomic()) return;
ProcessMarkValue(header, heap->marker(), value);
}
// static
void WriteBarrier::DijkstraMarkingBarrierRangeSlow(
HeapHandle& heap_handle, const void* first_element, size_t element_size,
size_t number_of_elements, TraceCallback trace_callback) {
auto& heap_base = HeapBase::From(heap_handle);
MarkerBase* marker = heap_base.marker();
if (!marker) {
return;
}
ObjectAllocator::NoAllocationScope no_allocation(
heap_base.object_allocator());
const char* array = static_cast<const char*>(first_element);
while (number_of_elements-- > 0) {
trace_callback(&heap_base.marker()->Visitor(), array);
array += element_size;
}
}
// static
void WriteBarrier::SteeleMarkingBarrierSlowWithSentinelCheck(
const void* value) {
if (!value || value == kSentinelPointer) return;
SteeleMarkingBarrierSlow(value);
}
// static
void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap();
// Marker being not set up means that no incremental/concurrent marking is in
// progress.
if (!heap->marker()) return;
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
if (!header.IsMarked<AccessMode::kAtomic>()) return;
ProcessMarkValue(header, heap->marker(), value);
} }
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
void WriteBarrier::GenerationalBarrierSlow(CagedHeapLocalData* local_data, // static
void WriteBarrier::GenerationalBarrierSlow(CagedHeapLocalData& local_data,
const AgeTable& age_table, const AgeTable& age_table,
const void* slot, const void* slot,
uintptr_t value_offset) { uintptr_t value_offset) {
if (age_table[value_offset] == AgeTable::Age::kOld) return; if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot. // Record slot.
local_data->heap_base->remembered_slots().insert(const_cast<void*>(slot)); local_data.heap_base->remembered_slots().insert(const_cast<void*>(slot));
} }
#endif #endif
......
...@@ -231,7 +231,7 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedEmptyStack) { ...@@ -231,7 +231,7 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedEmptyStack) {
GCedWithCallback* object = MakeGarbageCollected<GCedWithCallback>( GCedWithCallback* object = MakeGarbageCollected<GCedWithCallback>(
GetAllocationHandle(), [marker = marker()](GCedWithCallback* obj) { GetAllocationHandle(), [marker = marker()](GCedWithCallback* obj) {
Member<GCedWithCallback> member(obj); Member<GCedWithCallback> member(obj);
marker->VisitorForTesting().Trace(member); marker->Visitor().Trace(member);
}); });
EXPECT_FALSE(HeapObjectHeader::FromPayload(object).IsMarked()); EXPECT_FALSE(HeapObjectHeader::FromPayload(object).IsMarked());
marker()->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers); marker()->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
...@@ -246,7 +246,7 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedNonEmptyStack) { ...@@ -246,7 +246,7 @@ TEST_F(MarkerTest, InConstructionObjectIsEventuallyMarkedNonEmptyStack) {
MakeGarbageCollected<GCedWithCallback>( MakeGarbageCollected<GCedWithCallback>(
GetAllocationHandle(), [marker = marker()](GCedWithCallback* obj) { GetAllocationHandle(), [marker = marker()](GCedWithCallback* obj) {
Member<GCedWithCallback> member(obj); Member<GCedWithCallback> member(obj);
marker->VisitorForTesting().Trace(member); marker->Visitor().Trace(member);
EXPECT_FALSE(HeapObjectHeader::FromPayload(obj).IsMarked()); EXPECT_FALSE(HeapObjectHeader::FromPayload(obj).IsMarked());
marker->FinishMarking( marker->FinishMarking(
MarkingConfig::StackState::kMayContainHeapPointers); MarkingConfig::StackState::kMayContainHeapPointers);
......
...@@ -207,8 +207,8 @@ TYPED_TEST(MinorGCTestForType, OmitGenerationalBarrierForOnStackObject) { ...@@ -207,8 +207,8 @@ TYPED_TEST(MinorGCTestForType, OmitGenerationalBarrierForOnStackObject) {
// Try issuing generational barrier for on-stack object. // Try issuing generational barrier for on-stack object.
stack_object.ptr = new_object; stack_object.ptr = new_object;
WriteBarrier::MarkingBarrier(reinterpret_cast<void*>(&stack_object.ptr), WriteBarrier::DijkstraMarkingBarrier(
new_object); reinterpret_cast<void*>(&stack_object.ptr), new_object);
EXPECT_EQ(set_size_before_barrier, set.size()); EXPECT_EQ(set_size_before_barrier, set.size());
} }
......
...@@ -80,8 +80,7 @@ TEST_F(WeakContainerTest, TraceableGCedTraced) { ...@@ -80,8 +80,7 @@ TEST_F(WeakContainerTest, TraceableGCedTraced) {
MakeGarbageCollected<TraceableGCed>(GetAllocationHandle()); MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
TraceableGCed::n_trace_calls = 0u; TraceableGCed::n_trace_calls = 0u;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback, GetMarkerRef()->Visitor().TraceWeakContainer(obj, EmptyWeakCallback, nullptr);
nullptr);
FinishMarking(Config::StackState::kNoHeapPointers); FinishMarking(Config::StackState::kNoHeapPointers);
EXPECT_NE(0u, TraceableGCed::n_trace_calls); EXPECT_NE(0u, TraceableGCed::n_trace_calls);
access(obj); access(obj);
...@@ -92,8 +91,7 @@ TEST_F(WeakContainerTest, NonTraceableGCedNotTraced) { ...@@ -92,8 +91,7 @@ TEST_F(WeakContainerTest, NonTraceableGCedNotTraced) {
MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle()); MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle());
NonTraceableGCed::n_trace_calls = 0u; NonTraceableGCed::n_trace_calls = 0u;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback, GetMarkerRef()->Visitor().TraceWeakContainer(obj, EmptyWeakCallback, nullptr);
nullptr);
FinishMarking(Config::StackState::kNoHeapPointers); FinishMarking(Config::StackState::kNoHeapPointers);
EXPECT_EQ(0u, NonTraceableGCed::n_trace_calls); EXPECT_EQ(0u, NonTraceableGCed::n_trace_calls);
access(obj); access(obj);
...@@ -104,8 +102,7 @@ TEST_F(WeakContainerTest, NonTraceableGCedNotTracedConservatively) { ...@@ -104,8 +102,7 @@ TEST_F(WeakContainerTest, NonTraceableGCedNotTracedConservatively) {
MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle()); MakeGarbageCollected<NonTraceableGCed>(GetAllocationHandle());
NonTraceableGCed::n_trace_calls = 0u; NonTraceableGCed::n_trace_calls = 0u;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback, GetMarkerRef()->Visitor().TraceWeakContainer(obj, EmptyWeakCallback, nullptr);
nullptr);
FinishMarking(Config::StackState::kMayContainHeapPointers); FinishMarking(Config::StackState::kMayContainHeapPointers);
EXPECT_NE(0u, NonTraceableGCed::n_trace_calls); EXPECT_NE(0u, NonTraceableGCed::n_trace_calls);
access(obj); access(obj);
...@@ -118,8 +115,8 @@ TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainer) { ...@@ -118,8 +115,8 @@ TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainer) {
MakeGarbageCollected<TraceableGCed>(GetAllocationHandle()); MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
TraceableGCed::n_trace_calls = 0u; TraceableGCed::n_trace_calls = 0u;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer( GetMarkerRef()->Visitor().TraceWeakContainer(obj, EmptyWeakCallback,
obj, EmptyWeakCallback, nullptr); nullptr);
FinishMarking(Config::StackState::kNoHeapPointers); FinishMarking(Config::StackState::kNoHeapPointers);
trace_count_without_conservative = TraceableGCed::n_trace_calls; trace_count_without_conservative = TraceableGCed::n_trace_calls;
access(obj); access(obj);
...@@ -129,8 +126,8 @@ TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainer) { ...@@ -129,8 +126,8 @@ TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainer) {
MakeGarbageCollected<TraceableGCed>(GetAllocationHandle()); MakeGarbageCollected<TraceableGCed>(GetAllocationHandle());
TraceableGCed::n_trace_calls = 0u; TraceableGCed::n_trace_calls = 0u;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer( GetMarkerRef()->Visitor().TraceWeakContainer(obj, EmptyWeakCallback,
obj, EmptyWeakCallback, nullptr); nullptr);
FinishMarking(Config::StackState::kMayContainHeapPointers); FinishMarking(Config::StackState::kMayContainHeapPointers);
EXPECT_LT(trace_count_without_conservative, TraceableGCed::n_trace_calls); EXPECT_LT(trace_count_without_conservative, TraceableGCed::n_trace_calls);
access(obj); access(obj);
...@@ -146,8 +143,7 @@ TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainerOnce) { ...@@ -146,8 +143,7 @@ TEST_F(WeakContainerTest, ConservativeGCTracesWeakContainerOnce) {
USE(another_copy_obj); USE(another_copy_obj);
NonTraceableGCed::n_trace_calls = 0u; NonTraceableGCed::n_trace_calls = 0u;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer(obj, EmptyWeakCallback, GetMarkerRef()->Visitor().TraceWeakContainer(obj, EmptyWeakCallback, nullptr);
nullptr);
FinishMarking(Config::StackState::kMayContainHeapPointers); FinishMarking(Config::StackState::kMayContainHeapPointers);
EXPECT_EQ(1u, NonTraceableGCed::n_trace_calls); EXPECT_EQ(1u, NonTraceableGCed::n_trace_calls);
access(obj); access(obj);
...@@ -174,8 +170,8 @@ TEST_F(WeakContainerTest, WeakContainerWeakCallbackCalled) { ...@@ -174,8 +170,8 @@ TEST_F(WeakContainerTest, WeakContainerWeakCallbackCalled) {
WeakCallback::n_callback_called = 0u; WeakCallback::n_callback_called = 0u;
WeakCallback::obj = nullptr; WeakCallback::obj = nullptr;
StartMarking(); StartMarking();
GetMarkerRef()->VisitorForTesting().TraceWeakContainer( GetMarkerRef()->Visitor().TraceWeakContainer(obj, WeakCallback::callback,
obj, WeakCallback::callback, obj); obj);
FinishMarking(Config::StackState::kMayContainHeapPointers); FinishMarking(Config::StackState::kMayContainHeapPointers);
EXPECT_NE(0u, WeakCallback::n_callback_called); EXPECT_NE(0u, WeakCallback::n_callback_called);
EXPECT_EQ(obj, WeakCallback::obj); EXPECT_EQ(obj, WeakCallback::obj);
......
...@@ -8,7 +8,9 @@ ...@@ -8,7 +8,9 @@
#include <initializer_list> #include <initializer_list>
#include <vector> #include <vector>
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/internal/pointer-policies.h" #include "include/cppgc/internal/pointer-policies.h"
#include "src/base/logging.h"
#include "src/heap/cppgc/heap-object-header.h" #include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marker.h" #include "src/heap/cppgc/marker.h"
#include "test/unittests/heap/cppgc/tests.h" #include "test/unittests/heap/cppgc/tests.h"
...@@ -143,6 +145,7 @@ class GCed : public GarbageCollected<GCed> { ...@@ -143,6 +145,7 @@ class GCed : public GarbageCollected<GCed> {
class WriteBarrierTest : public testing::TestWithHeap { class WriteBarrierTest : public testing::TestWithHeap {
public: public:
WriteBarrierTest() : internal_heap_(Heap::From(GetHeap())) { WriteBarrierTest() : internal_heap_(Heap::From(GetHeap())) {
DCHECK_NULL(GetMarkerRef().get());
GetMarkerRef() = MarkerFactory::CreateAndStartMarking<Marker>( GetMarkerRef() = MarkerFactory::CreateAndStartMarking<Marker>(
*internal_heap_, GetPlatformHandle().get(), *internal_heap_, GetPlatformHandle().get(),
IncrementalMarkingScope::kIncrementalConfig); IncrementalMarkingScope::kIncrementalConfig);
...@@ -317,5 +320,133 @@ TEST_F(WriteBarrierTest, NoWriteBarrierOnMarkedMixinApplication) { ...@@ -317,5 +320,133 @@ TEST_F(WriteBarrierTest, NoWriteBarrierOnMarkedMixinApplication) {
} }
} }
// =============================================================================
// Raw barriers. ===============================================================
// =============================================================================
TEST_F(WriteBarrierTest, DijkstraWriteBarrierTriggersWhenMarkingIsOn) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCed>(GetAllocationHandle(), object1);
{
ExpectWriteBarrierFires scope(marker(), {object1});
EXPECT_FALSE(object1->IsMarked());
subtle::HeapConsistency::DijkstraWriteBarrier(
object2->next_ref().GetSlotForTesting(), object2->next_ref().Get());
EXPECT_TRUE(object1->IsMarked());
}
}
TEST_F(NoWriteBarrierTest, DijkstraWriteBarrierBailoutWhenMarkingIsOff) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCed>(GetAllocationHandle(), object1);
{
EXPECT_FALSE(object1->IsMarked());
subtle::HeapConsistency::DijkstraWriteBarrier(
object2->next_ref().GetSlotForTesting(), object2->next_ref().Get());
EXPECT_FALSE(object1->IsMarked());
}
}
TEST_F(WriteBarrierTest, DijkstraWriteBarrierBailoutIfMarked) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCed>(GetAllocationHandle(), object1);
EXPECT_TRUE(HeapObjectHeader::FromPayload(object1).TryMarkAtomic());
{
ExpectNoWriteBarrierFires scope(marker(), {object1});
subtle::HeapConsistency::DijkstraWriteBarrier(
object2->next_ref().GetSlotForTesting(), object2->next_ref().Get());
}
}
namespace {
struct InlinedObject {
void Trace(cppgc::Visitor* v) const { v->Trace(ref); }
Member<GCed> ref;
};
class GCedWithInlinedArray : public GarbageCollected<GCed> {
public:
static constexpr size_t kNumReferences = 4;
explicit GCedWithInlinedArray(GCed* value2) {
new (&objects[2].ref) Member<GCed>(value2);
}
void Trace(cppgc::Visitor* v) const {
for (size_t i = 0; i < kNumReferences; ++i) {
v->Trace(objects[i]);
}
}
InlinedObject objects[kNumReferences];
};
} // namespace
TEST_F(WriteBarrierTest, DijkstraWriteBarrierRangeTriggersWhenMarkingIsOn) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCedWithInlinedArray>(
GetAllocationHandle(), object1);
{
ExpectWriteBarrierFires scope(marker(), {object1});
EXPECT_FALSE(object1->IsMarked());
subtle::HeapConsistency::DijkstraWriteBarrierRange(
[this]() -> cppgc::HeapHandle& { return GetHeap()->GetHeapHandle(); },
object2->objects, sizeof(InlinedObject), 4,
TraceTrait<InlinedObject>::Trace);
EXPECT_TRUE(object1->IsMarked());
}
}
TEST_F(NoWriteBarrierTest, DijkstraWriteBarrierRangeBailoutWhenMarkingIsOff) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCedWithInlinedArray>(
GetAllocationHandle(), object1);
{
EXPECT_FALSE(object1->IsMarked());
subtle::HeapConsistency::DijkstraWriteBarrierRange(
[this]() -> cppgc::HeapHandle& { return GetHeap()->GetHeapHandle(); },
object2->objects, sizeof(InlinedObject), 4,
TraceTrait<InlinedObject>::Trace);
EXPECT_FALSE(object1->IsMarked());
}
}
TEST_F(WriteBarrierTest, DijkstraWriteBarrierRangeBailoutIfMarked) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCedWithInlinedArray>(
GetAllocationHandle(), object1);
EXPECT_TRUE(HeapObjectHeader::FromPayload(object1).TryMarkAtomic());
{
ExpectNoWriteBarrierFires scope(marker(), {object1});
subtle::HeapConsistency::DijkstraWriteBarrierRange(
[this]() -> cppgc::HeapHandle& { return GetHeap()->GetHeapHandle(); },
object2->objects, sizeof(InlinedObject), 4,
TraceTrait<InlinedObject>::Trace);
}
}
TEST_F(WriteBarrierTest, SteeleWriteBarrierTriggersWhenMarkingIsOn) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCed>(GetAllocationHandle(), object1);
{
ExpectWriteBarrierFires scope(marker(), {object1});
EXPECT_TRUE(HeapObjectHeader::FromPayload(object1).TryMarkAtomic());
// Steele barrier puts the object on the worklist for rescanning.
subtle::HeapConsistency::SteeleWriteBarrier(object2->next_ref().Get());
}
}
TEST_F(WriteBarrierTest, SteeleWriteBarrierBailoutIfNotMarked) {
auto* object1 = MakeGarbageCollected<GCed>(GetAllocationHandle());
auto* object2 = MakeGarbageCollected<GCed>(GetAllocationHandle(), object1);
{
ExpectNoWriteBarrierFires scope(marker(), {object1});
subtle::HeapConsistency::SteeleWriteBarrier(object2->next_ref().Get());
}
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment