Commit c7dfa3fa authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: young-gen: Add runtime option for young generation

The CL introduces a new option --cppgc-young-generation. This option
can't be enabled statically, because V8 options are parsed after heap
initialization. The CL changes minor GC so that it can be enabled
dynamically. The way it works is as follows:
- the user calls YoungGenerationEnabler::Enable();
- a heap checks in the next atomic pause whether the flag was enabled;
- if so, the heap enables young generation for itself.

To avoid barrier regressions without young-generation enabled, the CL changes the meaning of the global flag is-any-incremental-or-concurrent-marking to is-barrier-enabled.

The runtime option would enable us to test young generation on try-
and performance-bots.

Bug: chromium:1029379
Change-Id: I664cccdcd208225ffcbf9901f1284b56d088c5c3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3607993
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80274}
parent 69ca2bde
...@@ -59,7 +59,7 @@ class V8_EXPORT Heap { ...@@ -59,7 +59,7 @@ class V8_EXPORT Heap {
}; };
/** /**
* Specifies supported marking types * Specifies supported marking types.
*/ */
enum class MarkingType : uint8_t { enum class MarkingType : uint8_t {
/** /**
...@@ -79,7 +79,7 @@ class V8_EXPORT Heap { ...@@ -79,7 +79,7 @@ class V8_EXPORT Heap {
}; };
/** /**
* Specifies supported sweeping types * Specifies supported sweeping types.
*/ */
enum class SweepingType : uint8_t { enum class SweepingType : uint8_t {
/** /**
......
...@@ -75,6 +75,7 @@ struct CagedHeapLocalData final { ...@@ -75,6 +75,7 @@ struct CagedHeapLocalData final {
CagedHeapLocalData(HeapBase&, PageAllocator&); CagedHeapLocalData(HeapBase&, PageAllocator&);
bool is_incremental_marking_in_progress = false; bool is_incremental_marking_in_progress = false;
bool is_young_generation_enabled = false;
HeapBase& heap_base; HeapBase& heap_base;
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table; AgeTable age_table;
......
...@@ -95,12 +95,10 @@ class V8_EXPORT WriteBarrier final { ...@@ -95,12 +95,10 @@ class V8_EXPORT WriteBarrier final {
static void CheckParams(Type expected_type, const Params& params) {} static void CheckParams(Type expected_type, const Params& params) {}
#endif // !V8_ENABLE_CHECKS #endif // !V8_ENABLE_CHECKS
// The IncrementalOrConcurrentUpdater class allows cppgc internal to update // The FlagUpdater class allows cppgc internal to update
// |incremental_or_concurrent_marking_flag_|. // |write_barrier_enabled_|.
class IncrementalOrConcurrentMarkingFlagUpdater; class FlagUpdater;
static bool IsAnyIncrementalOrConcurrentMarking() { static bool IsEnabled() { return write_barrier_enabled_.MightBeEntered(); }
return incremental_or_concurrent_marking_flag_.MightBeEntered();
}
private: private:
WriteBarrier() = delete; WriteBarrier() = delete;
...@@ -130,7 +128,7 @@ class V8_EXPORT WriteBarrier final { ...@@ -130,7 +128,7 @@ class V8_EXPORT WriteBarrier final {
const CagedHeapLocalData& local_data, const void* object); const CagedHeapLocalData& local_data, const void* object);
#endif // CPPGC_YOUNG_GENERATION #endif // CPPGC_YOUNG_GENERATION
static AtomicEntryFlag incremental_or_concurrent_marking_flag_; static AtomicEntryFlag write_barrier_enabled_;
}; };
template <WriteBarrier::Type type> template <WriteBarrier::Type type>
...@@ -216,17 +214,17 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< ...@@ -216,17 +214,17 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value, static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params, WriteBarrier::Params& params,
HeapHandleCallback) { HeapHandleCallback) {
#if !defined(CPPGC_YOUNG_GENERATION) if (V8_LIKELY(!WriteBarrier::IsEnabled()))
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
#endif // !CPPGC_YOUNG_GENERATION const bool within_cage = TryGetCagedHeap(slot, value, params);
bool within_cage = TryGetCagedHeap(slot, value, params); if (!within_cage) return WriteBarrier::Type::kNone;
if (!within_cage) {
return WriteBarrier::Type::kNone; const auto& caged_heap = params.caged_heap();
} if (V8_LIKELY(!caged_heap.is_incremental_marking_in_progress)) {
if (V8_LIKELY(!params.caged_heap().is_incremental_marking_in_progress)) {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
if (!caged_heap.is_young_generation_enabled)
return WriteBarrier::Type::kNone;
params.heap = reinterpret_cast<HeapHandle*>(params.start); params.heap = reinterpret_cast<HeapHandle*>(params.start);
params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start; params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
params.value_offset = reinterpret_cast<uintptr_t>(value) - params.start; params.value_offset = reinterpret_cast<uintptr_t>(value) - params.start;
...@@ -235,6 +233,8 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< ...@@ -235,6 +233,8 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
#endif // !CPPGC_YOUNG_GENERATION #endif // !CPPGC_YOUNG_GENERATION
} }
// Use marking barrier.
params.heap = reinterpret_cast<HeapHandle*>(params.start); params.heap = reinterpret_cast<HeapHandle*>(params.start);
return SetAndReturnType<WriteBarrier::Type::kMarking>(params); return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
} }
...@@ -247,10 +247,15 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< ...@@ -247,10 +247,15 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*, static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*,
WriteBarrier::Params& params, WriteBarrier::Params& params,
HeapHandleCallback callback) { HeapHandleCallback callback) {
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
HeapHandle& handle = callback(); HeapHandle& handle = callback();
if (V8_LIKELY(!IsMarking(handle, params))) { if (V8_LIKELY(!IsMarking(handle, params))) {
// params.start is populated by IsMarking(). // params.start is populated by IsMarking().
if (!params.caged_heap().is_young_generation_enabled)
return WriteBarrier::Type::kNone;
params.heap = &handle; params.heap = &handle;
params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start; params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
// params.value_offset stays 0. // params.value_offset stays 0.
...@@ -260,15 +265,12 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< ...@@ -260,15 +265,12 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
} }
return SetAndReturnType<WriteBarrier::Type::kGenerational>(params); return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
} }
#else // !CPPGC_YOUNG_GENERATION #else // !defined(CPPGC_YOUNG_GENERATION)
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
HeapHandle& handle = callback(); HeapHandle& handle = callback();
if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) { if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
} }
#endif // !CPPGC_YOUNG_GENERATION #endif // !defined(CPPGC_YOUNG_GENERATION)
params.heap = &handle; params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params); return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
} }
...@@ -317,7 +319,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< ...@@ -317,7 +319,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
if (object <= static_cast<void*>(kSentinelPointer)) { if (object <= static_cast<void*>(kSentinelPointer)) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
} }
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) { if (V8_LIKELY(!WriteBarrier::IsEnabled())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params); return SetAndReturnType<WriteBarrier::Type::kNone>(params);
} }
if (IsMarking(object, &params.heap)) { if (IsMarking(object, &params.heap)) {
...@@ -334,7 +336,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< ...@@ -334,7 +336,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void*, const void*, static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
WriteBarrier::Params& params, WriteBarrier::Params& params,
HeapHandleCallback callback) { HeapHandleCallback callback) {
if (V8_UNLIKELY(WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) { if (V8_UNLIKELY(WriteBarrier::IsEnabled())) {
HeapHandle& handle = callback(); HeapHandle& handle = callback();
if (IsMarking(handle)) { if (IsMarking(handle)) {
params.heap = &handle; params.heap = &handle;
......
...@@ -1257,6 +1257,8 @@ DEFINE_INT(scavenge_task_trigger, 80, ...@@ -1257,6 +1257,8 @@ DEFINE_INT(scavenge_task_trigger, 80,
DEFINE_BOOL(scavenge_separate_stack_scanning, false, DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge") "use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge") DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(cppgc_young_generation, false,
"run young generation garbage collections in Oilpan")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory") DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#if defined(V8_ATOMIC_OBJECT_FIELD_WRITES) #if defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true #define V8_CONCURRENT_MARKING_BOOL true
......
...@@ -553,7 +553,8 @@ void CppHeap::InitializeTracing(CollectionType collection_type, ...@@ -553,7 +553,8 @@ void CppHeap::InitializeTracing(CollectionType collection_type,
collection_type_ = collection_type; collection_type_ = collection_type;
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
if (*collection_type_ == CollectionType::kMajor) if (generational_gc_supported() &&
*collection_type_ == CollectionType::kMajor)
cppgc::internal::SequentialUnmarker unmarker(raw_heap()); cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
...@@ -661,7 +662,11 @@ void CppHeap::TraceEpilogue() { ...@@ -661,7 +662,11 @@ void CppHeap::TraceEpilogue() {
USE(bytes_allocated_in_prefinalizers); USE(bytes_allocated_in_prefinalizers);
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
ResetRememberedSet(); // Check if the young generation was enabled via flag.
if (FLAG_cppgc_young_generation)
cppgc::internal::YoungGenerationEnabler::Enable();
ResetRememberedSetAndEnableMinorGCIfNeeded();
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
{ {
...@@ -679,6 +684,7 @@ void CppHeap::TraceEpilogue() { ...@@ -679,6 +684,7 @@ void CppHeap::TraceEpilogue() {
SweepingType::kAtomic == sweeping_config.sweeping_type); SweepingType::kAtomic == sweeping_config.sweeping_type);
sweeper().Start(sweeping_config); sweeper().Start(sweeping_config);
} }
in_atomic_pause_ = false; in_atomic_pause_ = false;
collection_type_.reset(); collection_type_.reset();
sweeper().NotifyDoneIfNeeded(); sweeper().NotifyDoneIfNeeded();
...@@ -687,6 +693,7 @@ void CppHeap::TraceEpilogue() { ...@@ -687,6 +693,7 @@ void CppHeap::TraceEpilogue() {
void CppHeap::RunMinorGC(StackState stack_state) { void CppHeap::RunMinorGC(StackState stack_state) {
DCHECK(!sweeper_.IsSweepingInProgress()); DCHECK(!sweeper_.IsSweepingInProgress());
if (!generational_gc_supported()) return;
if (in_no_gc_scope()) return; if (in_no_gc_scope()) return;
// Minor GC does not support nesting in full GCs. // Minor GC does not support nesting in full GCs.
if (IsMarking()) return; if (IsMarking()) return;
......
...@@ -77,5 +77,11 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator) ...@@ -77,5 +77,11 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
v8::base::PageFreeingMode::kMakeInaccessible); v8::base::PageFreeingMode::kMakeInaccessible);
} }
#if defined(CPPGC_YOUNG_GENERATION)
void CagedHeap::EnableGenerationalGC() {
local_data().is_young_generation_enabled = true;
}
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
...@@ -42,6 +42,10 @@ class CagedHeap final { ...@@ -42,6 +42,10 @@ class CagedHeap final {
CagedHeap(const CagedHeap&) = delete; CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete; CagedHeap& operator=(const CagedHeap&) = delete;
#if defined(CPPGC_YOUNG_GENERATION)
void EnableGenerationalGC();
#endif // defined(CPPGC_YOUNG_GENERATION)
AllocatorType& allocator() { return *bounded_allocator_; } AllocatorType& allocator() { return *bounded_allocator_; }
const AllocatorType& allocator() const { return *bounded_allocator_; } const AllocatorType& allocator() const { return *bounded_allocator_; }
......
...@@ -322,7 +322,13 @@ class CompactionState final { ...@@ -322,7 +322,13 @@ class CompactionState final {
Pages available_pages_; Pages available_pages_;
}; };
void CompactPage(NormalPage* page, CompactionState& compaction_state) { enum class StickyBits : uint8_t {
kDisabled,
kEnabled,
};
void CompactPage(NormalPage* page, CompactionState& compaction_state,
StickyBits sticky_bits) {
compaction_state.AddPage(page); compaction_state.AddPage(page);
page->object_start_bitmap().Clear(); page->object_start_bitmap().Clear();
...@@ -360,9 +366,12 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) { ...@@ -360,9 +366,12 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
} }
// Object is marked. // Object is marked.
#if !defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
if (sticky_bits == StickyBits::kDisabled) header->Unmark();
#else // !defined(CPPGC_YOUNG_GENERATION)
header->Unmark(); header->Unmark();
#endif #endif // !defined(CPPGC_YOUNG_GENERATION)
// Potentially unpoison the live object as well as it is the source of // Potentially unpoison the live object as well as it is the source of
// the copy. // the copy.
ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize()); ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize());
...@@ -373,8 +382,8 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) { ...@@ -373,8 +382,8 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
compaction_state.FinishCompactingPage(page); compaction_state.FinishCompactingPage(page);
} }
void CompactSpace(NormalPageSpace* space, void CompactSpace(NormalPageSpace* space, MovableReferences& movable_references,
MovableReferences& movable_references) { StickyBits sticky_bits) {
using Pages = NormalPageSpace::Pages; using Pages = NormalPageSpace::Pages;
#ifdef V8_USE_ADDRESS_SANITIZER #ifdef V8_USE_ADDRESS_SANITIZER
...@@ -417,7 +426,7 @@ void CompactSpace(NormalPageSpace* space, ...@@ -417,7 +426,7 @@ void CompactSpace(NormalPageSpace* space,
CompactionState compaction_state(space, movable_references); CompactionState compaction_state(space, movable_references);
for (BasePage* page : pages) { for (BasePage* page : pages) {
// Large objects do not belong to this arena. // Large objects do not belong to this arena.
CompactPage(NormalPage::From(page), compaction_state); CompactPage(NormalPage::From(page), compaction_state, sticky_bits);
} }
compaction_state.FinishCompactingSpace(); compaction_state.FinishCompactingSpace();
...@@ -508,8 +517,12 @@ Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() { ...@@ -508,8 +517,12 @@ Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
} }
compaction_worklists_.reset(); compaction_worklists_.reset();
const bool young_gen_enabled = heap_.heap()->generational_gc_supported();
for (NormalPageSpace* space : compactable_spaces_) { for (NormalPageSpace* space : compactable_spaces_) {
CompactSpace(space, movable_references); CompactSpace(
space, movable_references,
young_gen_enabled ? StickyBits::kEnabled : StickyBits::kDisabled);
} }
enable_for_next_gc_for_testing_ = false; enable_for_next_gc_for_testing_ = false;
......
...@@ -68,10 +68,12 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle, ...@@ -68,10 +68,12 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
} }
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
auto& heap_base = HeapBase::From(heap_handle); auto& heap_base = HeapBase::From(heap_handle);
heap_base.remembered_set().InvalidateRememberedSlotsInRange( if (heap_base.generational_gc_supported()) {
object, reinterpret_cast<uint8_t*>(object) + object_size); heap_base.remembered_set().InvalidateRememberedSlotsInRange(
// If this object was registered as remembered, remove it. object, reinterpret_cast<uint8_t*>(object) + object_size);
heap_base.remembered_set().InvalidateRememberedSourceObject(header); // If this object was registered as remembered, remove it.
heap_base.remembered_set().InvalidateRememberedSourceObject(header);
}
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
} }
...@@ -122,8 +124,11 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size, ...@@ -122,8 +124,11 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
header.SetAllocatedSize(new_size); header.SetAllocatedSize(new_size);
} }
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
base_page.heap().remembered_set().InvalidateRememberedSlotsInRange( auto& heap = base_page.heap();
free_start, free_start + size_delta); if (heap.generational_gc_supported()) {
heap.remembered_set().InvalidateRememberedSlotsInRange(
free_start, free_start + size_delta);
}
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
// Return success in any case, as we want to avoid that embedders start // Return success in any case, as we want to avoid that embedders start
// copying memory because of small deltas. // copying memory because of small deltas.
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "src/heap/cppgc/prefinalizer-handler.h" #include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h" #include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/unmarker.h" #include "src/heap/cppgc/unmarker.h"
#include "src/heap/cppgc/write-barrier.h"
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
...@@ -88,7 +89,8 @@ HeapBase::HeapBase( ...@@ -88,7 +89,8 @@ HeapBase::HeapBase(
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
stack_support_(stack_support), stack_support_(stack_support),
marking_support_(marking_support), marking_support_(marking_support),
sweeping_support_(sweeping_support) { sweeping_support_(sweeping_support),
generation_support_(GenerationSupport::kSingleGeneration) {
stats_collector_->RegisterObserver( stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_); &allocation_observer_for_PROCESS_HEAP_STATISTICS_);
} }
...@@ -120,7 +122,8 @@ size_t HeapBase::ExecutePreFinalizers() { ...@@ -120,7 +122,8 @@ size_t HeapBase::ExecutePreFinalizers() {
} }
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
void HeapBase::ResetRememberedSet() { void HeapBase::ResetRememberedSetAndEnableMinorGCIfNeeded() {
DCHECK(in_atomic_pause());
class AllLABsAreEmpty final : protected HeapVisitor<AllLABsAreEmpty> { class AllLABsAreEmpty final : protected HeapVisitor<AllLABsAreEmpty> {
friend class HeapVisitor<AllLABsAreEmpty>; friend class HeapVisitor<AllLABsAreEmpty>;
...@@ -140,9 +143,30 @@ void HeapBase::ResetRememberedSet() { ...@@ -140,9 +143,30 @@ void HeapBase::ResetRememberedSet() {
bool some_lab_is_set_ = false; bool some_lab_is_set_ = false;
}; };
DCHECK(AllLABsAreEmpty(raw_heap()).value()); DCHECK(AllLABsAreEmpty(raw_heap()).value());
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
remembered_set_.Reset(); if (generational_gc_supported()) {
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
remembered_set_.Reset();
return;
}
DCHECK(remembered_set_.IsEmpty());
// Check if the young generation was enabled since the last cycle.
if (YoungGenerationEnabler::IsEnabled()) {
// Enable young generation for the current heap.
caged_heap().EnableGenerationalGC();
generation_support_ = GenerationSupport::kYoungAndOldGenerations;
}
} }
void HeapBase::DisableGenerationalGCForTesting() {
DCHECK(caged_heap().local_data().is_young_generation_enabled);
DCHECK_EQ(GenerationSupport::kYoungAndOldGenerations, generation_support_);
caged_heap().local_data().is_young_generation_enabled = false;
generation_support_ = GenerationSupport::kSingleGeneration;
}
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
void HeapBase::Terminate() { void HeapBase::Terminate() {
...@@ -168,10 +192,12 @@ void HeapBase::Terminate() { ...@@ -168,10 +192,12 @@ void HeapBase::Terminate() {
} }
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
// Unmark the heap so that the sweeper destructs all objects. if (generational_gc_supported()) {
// TODO(chromium:1029379): Merge two heap iterations (unmarking + sweeping) // Unmark the heap so that the sweeper destructs all objects.
// into forced finalization. // TODO(chromium:1029379): Merge two heap iterations (unmarking +
SequentialUnmarker unmarker(raw_heap()); // sweeping) into forced finalization.
SequentialUnmarker unmarker(raw_heap());
}
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
in_atomic_pause_ = true; in_atomic_pause_ = true;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "src/heap/cppgc/process-heap.h" #include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h" #include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h" #include "src/heap/cppgc/sweeper.h"
#include "src/heap/cppgc/write-barrier.h"
#include "v8config.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
...@@ -214,7 +215,25 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle { ...@@ -214,7 +215,25 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
MarkingType marking_support() const { return marking_support_; } MarkingType marking_support() const { return marking_support_; }
SweepingType sweeping_support() const { return sweeping_support_; } SweepingType sweeping_support() const { return sweeping_support_; }
bool generational_gc_supported() const {
const bool supported =
(generation_support_ == GenerationSupport::kYoungAndOldGenerations);
#if defined(CPPGC_YOUNG_GENERATION)
DCHECK_IMPLIES(supported, YoungGenerationEnabler::IsEnabled());
#endif // defined(CPPGC_YOUNG_GENERATION)
return supported;
}
#if defined(CPPGC_YOUNG_GENERATION)
void DisableGenerationalGCForTesting();
#endif // defined(CPPGC_YOUNG_GENERATION)
protected: protected:
enum class GenerationSupport : uint8_t {
kSingleGeneration,
kYoungAndOldGenerations,
};
// Used by the incremental scheduler to finalize a GC if supported. // Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded( virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) = 0; cppgc::Heap::StackState) = 0;
...@@ -227,7 +246,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle { ...@@ -227,7 +246,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
size_t ExecutePreFinalizers(); size_t ExecutePreFinalizers();
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
void ResetRememberedSet(); void ResetRememberedSetAndEnableMinorGCIfNeeded();
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
PageAllocator* page_allocator() const; PageAllocator* page_allocator() const;
...@@ -286,6 +305,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle { ...@@ -286,6 +305,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
const MarkingType marking_support_; const MarkingType marking_support_;
const SweepingType sweeping_support_; const SweepingType sweeping_support_;
GenerationSupport generation_support_;
friend class MarkerBase::IncrementalMarkingTask; friend class MarkerBase::IncrementalMarkingTask;
friend class cppgc::subtle::DisallowGarbageCollectionScope; friend class cppgc::subtle::DisallowGarbageCollectionScope;
......
...@@ -190,7 +190,7 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) { ...@@ -190,7 +190,7 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
USE(bytes_allocated_in_prefinalizers); USE(bytes_allocated_in_prefinalizers);
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
ResetRememberedSet(); ResetRememberedSetAndEnableMinorGCIfNeeded();
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
subtle::NoGarbageCollectionScope no_gc(*this); subtle::NoGarbageCollectionScope no_gc(*this);
......
...@@ -37,7 +37,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -37,7 +37,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental || if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type == config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) { Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter(); WriteBarrier::FlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true; heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif // defined(CPPGC_CAGED_HEAP) #endif // defined(CPPGC_CAGED_HEAP)
...@@ -51,7 +51,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config, ...@@ -51,7 +51,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental || if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type == config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) { Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit(); WriteBarrier::FlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false; heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif // defined(CPPGC_CAGED_HEAP) #endif // defined(CPPGC_CAGED_HEAP)
...@@ -155,7 +155,11 @@ MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform, ...@@ -155,7 +155,11 @@ MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
platform_(platform), platform_(platform),
foreground_task_runner_(platform_->GetForegroundTaskRunner()), foreground_task_runner_(platform_->GetForegroundTaskRunner()),
mutator_marking_state_(heap, marking_worklists_, mutator_marking_state_(heap, marking_worklists_,
heap.compactor().compaction_worklists()) {} heap.compactor().compaction_worklists()) {
DCHECK_IMPLIES(
config_.collection_type == MarkingConfig::CollectionType::kMinor,
heap_.generational_gc_supported());
}
MarkerBase::~MarkerBase() { MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects. // The fixed point iteration may have found not-fully-constructed objects.
...@@ -340,19 +344,21 @@ void MarkerBase::ProcessWeakness() { ...@@ -340,19 +344,21 @@ void MarkerBase::ProcessWeakness() {
// Call weak callbacks on objects that may now be pointing to dead objects. // Call weak callbacks on objects that may now be pointing to dead objects.
LivenessBroker broker = LivenessBrokerFactory::Create(); LivenessBroker broker = LivenessBrokerFactory::Create();
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
auto& remembered_set = heap().remembered_set(); if (heap().generational_gc_supported()) {
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) { auto& remembered_set = heap().remembered_set();
// Custom callbacks assume that untraced pointers point to not yet freed if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
// objects. They must make sure that upon callback completion no // Custom callbacks assume that untraced pointers point to not yet freed
// UntracedMember points to a freed object. This may not hold true if a // objects. They must make sure that upon callback completion no
// custom callback for an old object operates with a reference to a young // UntracedMember points to a freed object. This may not hold true if a
// object that was freed on a minor collection cycle. To maintain the // custom callback for an old object operates with a reference to a young
// invariant that UntracedMembers always point to valid objects, execute // object that was freed on a minor collection cycle. To maintain the
// custom callbacks for old objects on each minor collection cycle. // invariant that UntracedMembers always point to valid objects, execute
remembered_set.ExecuteCustomCallbacks(broker); // custom callbacks for old objects on each minor collection cycle.
} else { remembered_set.ExecuteCustomCallbacks(broker);
// For major GCs, just release all the remembered weak callbacks. } else {
remembered_set.ReleaseCustomCallbacks(); // For major GCs, just release all the remembered weak callbacks.
remembered_set.ReleaseCustomCallbacks();
}
} }
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
...@@ -362,7 +368,8 @@ void MarkerBase::ProcessWeakness() { ...@@ -362,7 +368,8 @@ void MarkerBase::ProcessWeakness() {
while (local.Pop(&item)) { while (local.Pop(&item)) {
item.callback(broker, item.parameter); item.callback(broker, item.parameter);
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
heap().remembered_set().AddWeakCallback(item); if (heap().generational_gc_supported())
heap().remembered_set().AddWeakCallback(item);
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
} }
......
...@@ -30,6 +30,8 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) { ...@@ -30,6 +30,8 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
DCHECK_LT(begin, end); DCHECK_LT(begin, end);
if (!page->heap().generational_gc_supported()) return;
// Then, if the page is newly allocated, force the first and last cards to be // Then, if the page is newly allocated, force the first and last cards to be
// marked as young. // marked as young.
const bool new_page = const bool new_page =
......
...@@ -241,8 +241,7 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() { ...@@ -241,8 +241,7 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active. // Use non-atomic accesses on ARMv7 when marking is not active.
if (mode == AccessMode::kAtomic) { if (mode == AccessMode::kAtomic) {
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) if (V8_LIKELY(!WriteBarrier::IsEnabled())) return true;
return true;
} }
#endif // defined(V8_TARGET_ARCH_ARM) #endif // defined(V8_TARGET_ARCH_ARM)
return false; return false;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <algorithm> #include <algorithm>
#include "include/cppgc/visitor.h" #include "include/cppgc/visitor.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h" #include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h" #include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/marking-state.h" #include "src/heap/cppgc/marking-state.h"
...@@ -76,14 +77,17 @@ void VisitRememberedSourceObjects( ...@@ -76,14 +77,17 @@ void VisitRememberedSourceObjects(
} // namespace } // namespace
void OldToNewRememberedSet::AddSlot(void* slot) { void OldToNewRememberedSet::AddSlot(void* slot) {
DCHECK(heap_.generational_gc_supported());
remembered_slots_.insert(slot); remembered_slots_.insert(slot);
} }
void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) { void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) {
DCHECK(heap_.generational_gc_supported());
remembered_source_objects_.insert(&hoh); remembered_source_objects_.insert(&hoh);
} }
void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) { void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
DCHECK(heap_.generational_gc_supported());
// TODO(1029379): WeakCallbacks are also executed for weak collections. // TODO(1029379): WeakCallbacks are also executed for weak collections.
// Consider splitting weak-callbacks in custom weak callbacks and ones for // Consider splitting weak-callbacks in custom weak callbacks and ones for
// collections. // collections.
...@@ -92,6 +96,7 @@ void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) { ...@@ -92,6 +96,7 @@ void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin, void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
void* end) { void* end) {
DCHECK(heap_.generational_gc_supported());
// TODO(1029379): The 2 binary walks can be optimized with a custom algorithm. // TODO(1029379): The 2 binary walks can be optimized with a custom algorithm.
auto from = remembered_slots_.lower_bound(begin), auto from = remembered_slots_.lower_bound(begin),
to = remembered_slots_.lower_bound(end); to = remembered_slots_.lower_bound(end);
...@@ -108,29 +113,39 @@ void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin, ...@@ -108,29 +113,39 @@ void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
void OldToNewRememberedSet::InvalidateRememberedSourceObject( void OldToNewRememberedSet::InvalidateRememberedSourceObject(
HeapObjectHeader& header) { HeapObjectHeader& header) {
DCHECK(heap_.generational_gc_supported());
remembered_source_objects_.erase(&header); remembered_source_objects_.erase(&header);
} }
void OldToNewRememberedSet::Visit(Visitor& visitor, void OldToNewRememberedSet::Visit(Visitor& visitor,
MutatorMarkingState& marking_state) { MutatorMarkingState& marking_state) {
DCHECK(heap_.generational_gc_supported());
VisitRememberedSlots(remembered_slots_, heap_, marking_state); VisitRememberedSlots(remembered_slots_, heap_, marking_state);
VisitRememberedSourceObjects(remembered_source_objects_, visitor); VisitRememberedSourceObjects(remembered_source_objects_, visitor);
} }
void OldToNewRememberedSet::ExecuteCustomCallbacks(LivenessBroker broker) { void OldToNewRememberedSet::ExecuteCustomCallbacks(LivenessBroker broker) {
DCHECK(heap_.generational_gc_supported());
for (const auto& callback : remembered_weak_callbacks_) { for (const auto& callback : remembered_weak_callbacks_) {
callback.callback(broker, callback.parameter); callback.callback(broker, callback.parameter);
} }
} }
void OldToNewRememberedSet::ReleaseCustomCallbacks() { void OldToNewRememberedSet::ReleaseCustomCallbacks() {
DCHECK(heap_.generational_gc_supported());
remembered_weak_callbacks_.clear(); remembered_weak_callbacks_.clear();
} }
void OldToNewRememberedSet::Reset() { void OldToNewRememberedSet::Reset() {
DCHECK(heap_.generational_gc_supported());
remembered_slots_.clear(); remembered_slots_.clear();
remembered_source_objects_.clear(); remembered_source_objects_.clear();
} }
bool OldToNewRememberedSet::IsEmpty() const {
return remembered_slots_.empty() && remembered_source_objects_.empty() &&
remembered_weak_callbacks_.empty();
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
...@@ -45,6 +45,8 @@ class V8_EXPORT_PRIVATE OldToNewRememberedSet final { ...@@ -45,6 +45,8 @@ class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
void Reset(); void Reset();
bool IsEmpty() const;
private: private:
friend class MinorGCTest; friend class MinorGCTest;
......
...@@ -32,6 +32,11 @@ namespace { ...@@ -32,6 +32,11 @@ namespace {
using v8::base::Optional; using v8::base::Optional;
enum class StickyBits : uint8_t {
kDisabled,
kEnabled,
};
class ObjectStartBitmapVerifier class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> { : private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>; friend class HeapVisitor<ObjectStartBitmapVerifier>;
...@@ -199,11 +204,14 @@ struct SpaceState { ...@@ -199,11 +204,14 @@ struct SpaceState {
using SpaceStates = std::vector<SpaceState>; using SpaceStates = std::vector<SpaceState>;
void StickyUnmark(HeapObjectHeader* header) { void StickyUnmark(HeapObjectHeader* header, StickyBits sticky_bits) {
#if defined(CPPGC_YOUNG_GENERATION)
// Young generation in Oilpan uses sticky mark bits. // Young generation in Oilpan uses sticky mark bits.
#if !defined(CPPGC_YOUNG_GENERATION) if (sticky_bits == StickyBits::kDisabled)
header->Unmark<AccessMode::kAtomic>();
#else // !defined(CPPGC_YOUNG_GENERATION)
header->Unmark<AccessMode::kAtomic>(); header->Unmark<AccessMode::kAtomic>();
#endif #endif // !defined(CPPGC_YOUNG_GENERATION)
} }
class InlinedFinalizationBuilderBase { class InlinedFinalizationBuilderBase {
...@@ -285,13 +293,13 @@ class DeferredFinalizationBuilder final : public FreeHandler { ...@@ -285,13 +293,13 @@ class DeferredFinalizationBuilder final : public FreeHandler {
private: private:
ResultType result_; ResultType result_;
HeapObjectHeader* current_unfinalized_ = 0; HeapObjectHeader* current_unfinalized_ = nullptr;
bool found_finalizer_ = false; bool found_finalizer_ = false;
}; };
template <typename FinalizationBuilder> template <typename FinalizationBuilder>
typename FinalizationBuilder::ResultType SweepNormalPage( typename FinalizationBuilder::ResultType SweepNormalPage(
NormalPage* page, PageAllocator& page_allocator) { NormalPage* page, PageAllocator& page_allocator, StickyBits sticky_bits) {
constexpr auto kAtomicAccess = AccessMode::kAtomic; constexpr auto kAtomicAccess = AccessMode::kAtomic;
FinalizationBuilder builder(*page, page_allocator); FinalizationBuilder builder(*page, page_allocator);
...@@ -345,7 +353,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage( ...@@ -345,7 +353,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
largest_new_free_list_entry = largest_new_free_list_entry =
std::max(largest_new_free_list_entry, new_free_list_entry_size); std::max(largest_new_free_list_entry, new_free_list_entry_size);
} }
StickyUnmark(header); StickyUnmark(header, sticky_bits);
begin += size; begin += size;
start_of_gap = begin; start_of_gap = begin;
live_bytes += size; live_bytes += size;
...@@ -484,11 +492,15 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> { ...@@ -484,11 +492,15 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling; using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
public: public:
MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform, MutatorThreadSweeper(HeapBase* heap, SpaceStates* states,
cppgc::Platform* platform,
FreeMemoryHandling free_memory_handling) FreeMemoryHandling free_memory_handling)
: states_(states), : states_(states),
platform_(platform), platform_(platform),
free_memory_handling_(free_memory_handling) {} free_memory_handling_(free_memory_handling),
sticky_bits_(heap->generational_gc_supported()
? StickyBits::kEnabled
: StickyBits::kDisabled) {}
void Sweep() { void Sweep() {
for (SpaceState& state : *states_) { for (SpaceState& state : *states_) {
...@@ -553,9 +565,9 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> { ...@@ -553,9 +565,9 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
(free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
? SweepNormalPage< ? SweepNormalPage<
InlinedFinalizationBuilder<DiscardingFreeHandler>>( InlinedFinalizationBuilder<DiscardingFreeHandler>>(
&page, *platform_->GetPageAllocator()) &page, *platform_->GetPageAllocator(), sticky_bits_)
: SweepNormalPage<InlinedFinalizationBuilder<RegularFreeHandler>>( : SweepNormalPage<InlinedFinalizationBuilder<RegularFreeHandler>>(
&page, *platform_->GetPageAllocator()); &page, *platform_->GetPageAllocator(), sticky_bits_);
if (result.is_empty) { if (result.is_empty) {
NormalPage::Destroy(&page); NormalPage::Destroy(&page);
} else { } else {
...@@ -572,7 +584,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> { ...@@ -572,7 +584,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
bool VisitLargePage(LargePage& page) { bool VisitLargePage(LargePage& page) {
HeapObjectHeader* header = page.ObjectHeader(); HeapObjectHeader* header = page.ObjectHeader();
if (header->IsMarked()) { if (header->IsMarked()) {
StickyUnmark(header); StickyUnmark(header, sticky_bits_);
page.space().AddPage(&page); page.space().AddPage(&page);
} else { } else {
header->Finalize(); header->Finalize();
...@@ -585,6 +597,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> { ...@@ -585,6 +597,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
cppgc::Platform* platform_; cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0; size_t largest_new_free_list_entry_ = 0;
const FreeMemoryHandling free_memory_handling_; const FreeMemoryHandling free_memory_handling_;
const StickyBits sticky_bits_;
}; };
class ConcurrentSweepTask final : public cppgc::JobTask, class ConcurrentSweepTask final : public cppgc::JobTask,
...@@ -599,7 +612,10 @@ class ConcurrentSweepTask final : public cppgc::JobTask, ...@@ -599,7 +612,10 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
: heap_(heap), : heap_(heap),
states_(states), states_(states),
platform_(platform), platform_(platform),
free_memory_handling_(free_memory_handling) {} free_memory_handling_(free_memory_handling),
sticky_bits_(heap.generational_gc_supported() ? StickyBits::kEnabled
: StickyBits::kDisabled) {
}
void Run(cppgc::JobDelegate* delegate) final { void Run(cppgc::JobDelegate* delegate) final {
StatsCollector::EnabledConcurrentScope stats_scope( StatsCollector::EnabledConcurrentScope stats_scope(
...@@ -627,9 +643,9 @@ class ConcurrentSweepTask final : public cppgc::JobTask, ...@@ -627,9 +643,9 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
(free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
? SweepNormalPage< ? SweepNormalPage<
DeferredFinalizationBuilder<DiscardingFreeHandler>>( DeferredFinalizationBuilder<DiscardingFreeHandler>>(
&page, *platform_->GetPageAllocator()) &page, *platform_->GetPageAllocator(), sticky_bits_)
: SweepNormalPage<DeferredFinalizationBuilder<RegularFreeHandler>>( : SweepNormalPage<DeferredFinalizationBuilder<RegularFreeHandler>>(
&page, *platform_->GetPageAllocator()); &page, *platform_->GetPageAllocator(), sticky_bits_);
const size_t space_index = page.space().index(); const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index); DCHECK_GT(states_->size(), space_index);
SpaceState& space_state = (*states_)[space_index]; SpaceState& space_state = (*states_)[space_index];
...@@ -640,7 +656,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask, ...@@ -640,7 +656,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
bool VisitLargePage(LargePage& page) { bool VisitLargePage(LargePage& page) {
HeapObjectHeader* header = page.ObjectHeader(); HeapObjectHeader* header = page.ObjectHeader();
if (header->IsMarked()) { if (header->IsMarked()) {
StickyUnmark(header); StickyUnmark(header, sticky_bits_);
page.space().AddPage(&page); page.space().AddPage(&page);
return true; return true;
} }
...@@ -668,6 +684,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask, ...@@ -668,6 +684,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
Platform* platform_; Platform* platform_;
std::atomic_bool is_completed_{false}; std::atomic_bool is_completed_{false};
const FreeMemoryHandling free_memory_handling_; const FreeMemoryHandling free_memory_handling_;
const StickyBits sticky_bits_;
}; };
// This visitor: // This visitor:
...@@ -805,7 +822,7 @@ class Sweeper::SweeperImpl final { ...@@ -805,7 +822,7 @@ class Sweeper::SweeperImpl final {
{ {
// Then, if no matching slot is found in the unfinalized pages, search the // Then, if no matching slot is found in the unfinalized pages, search the
// unswept page. This also helps out the concurrent sweeper. // unswept page. This also helps out the concurrent sweeper.
MutatorThreadSweeper sweeper(&space_states_, platform_, MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
config_.free_memory_handling); config_.free_memory_handling);
while (auto page = space_state.unswept_pages.Pop()) { while (auto page = space_state.unswept_pages.Pop()) {
sweeper.SweepPage(**page); sweeper.SweepPage(**page);
...@@ -863,7 +880,7 @@ class Sweeper::SweeperImpl final { ...@@ -863,7 +880,7 @@ class Sweeper::SweeperImpl final {
finalizer.FinalizeHeap(&space_states_); finalizer.FinalizeHeap(&space_states_);
// Then, help out the concurrent thread. // Then, help out the concurrent thread.
MutatorThreadSweeper sweeper(&space_states_, platform_, MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
config_.free_memory_handling); config_.free_memory_handling);
sweeper.Sweep(); sweeper.Sweep();
...@@ -915,7 +932,7 @@ class Sweeper::SweeperImpl final { ...@@ -915,7 +932,7 @@ class Sweeper::SweeperImpl final {
StatsCollector::EnabledScope stats_scope( StatsCollector::EnabledScope stats_scope(
stats_collector_, StatsCollector::kIncrementalSweep); stats_collector_, StatsCollector::kIncrementalSweep);
MutatorThreadSweeper sweeper(&space_states_, platform_, MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
config_.free_memory_handling); config_.free_memory_handling);
{ {
StatsCollector::EnabledScope inner_stats_scope( StatsCollector::EnabledScope inner_stats_scope(
......
...@@ -21,7 +21,14 @@ namespace cppgc { ...@@ -21,7 +21,14 @@ namespace cppgc {
namespace internal { namespace internal {
// static // static
AtomicEntryFlag WriteBarrier::incremental_or_concurrent_marking_flag_; AtomicEntryFlag WriteBarrier::write_barrier_enabled_;
#if defined(CPPGC_YOUNG_GENERATION)
// static
bool YoungGenerationEnabler::is_enabled_;
// static
v8::base::LeakyObject<v8::base::Mutex> YoungGenerationEnabler::mutex_;
#endif // defined(CPPGC_YOUNG_GENERATION)
namespace { namespace {
...@@ -199,5 +206,28 @@ bool WriteBarrierTypeForCagedHeapPolicy::IsMarking( ...@@ -199,5 +206,28 @@ bool WriteBarrierTypeForCagedHeapPolicy::IsMarking(
#endif // CPPGC_CAGED_HEAP #endif // CPPGC_CAGED_HEAP
#if defined(CPPGC_YOUNG_GENERATION)
void YoungGenerationEnabler::Enable() {
v8::base::LockGuard _(mutex_.get());
if (is_enabled_) return;
// Enter the flag so that the check in the write barrier will always trigger
// when young generation is enabled.
WriteBarrier::FlagUpdater::Enter();
is_enabled_ = true;
}
void YoungGenerationEnabler::DisableForTesting() {
v8::base::LockGuard _(mutex_.get());
if (!is_enabled_) return;
WriteBarrier::FlagUpdater::Exit();
is_enabled_ = false;
}
bool YoungGenerationEnabler::IsEnabled() {
v8::base::LockGuard _(mutex_.get());
return is_enabled_;
}
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
...@@ -6,15 +6,36 @@ ...@@ -6,15 +6,36 @@
#define V8_HEAP_CPPGC_WRITE_BARRIER_H_ #define V8_HEAP_CPPGC_WRITE_BARRIER_H_
#include "include/cppgc/internal/write-barrier.h" #include "include/cppgc/internal/write-barrier.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
class WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater { class WriteBarrier::FlagUpdater final {
public: public:
static void Enter() { incremental_or_concurrent_marking_flag_.Enter(); } static void Enter() { write_barrier_enabled_.Enter(); }
static void Exit() { incremental_or_concurrent_marking_flag_.Exit(); } static void Exit() { write_barrier_enabled_.Exit(); }
private:
FlagUpdater() = delete;
};
#if defined(CPPGC_YOUNG_GENERATION)
class V8_EXPORT_PRIVATE YoungGenerationEnabler final {
public:
static void Enable();
static void DisableForTesting();
static bool IsEnabled();
private:
YoungGenerationEnabler() = delete;
static bool is_enabled_;
static v8::base::LeakyObject<v8::base::Mutex> mutex_;
}; };
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc
......
...@@ -1668,7 +1668,7 @@ void GCTracer::ReportYoungCycleToRecorder() { ...@@ -1668,7 +1668,7 @@ void GCTracer::ReportYoungCycleToRecorder() {
#if defined(CPPGC_YOUNG_GENERATION) #if defined(CPPGC_YOUNG_GENERATION)
// Managed C++ heap statistics: // Managed C++ heap statistics:
auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap()); auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
if (cpp_heap) { if (cpp_heap && cpp_heap->generational_gc_supported()) {
auto* metric_recorder = cpp_heap->GetMetricRecorder(); auto* metric_recorder = cpp_heap->GetMetricRecorder();
const base::Optional<cppgc::internal::MetricRecorder::GCCycle> const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
optional_cppgc_event = metric_recorder->ExtractLastYoungGcEvent(); optional_cppgc_event = metric_recorder->ExtractLastYoungGcEvent();
......
...@@ -64,11 +64,20 @@ struct OtherType<Large> { ...@@ -64,11 +64,20 @@ struct OtherType<Large> {
class MinorGCTest : public testing::TestWithHeap { class MinorGCTest : public testing::TestWithHeap {
public: public:
MinorGCTest() { MinorGCTest() : testing::TestWithHeap() {
// Enable young generation flag and run GC. After the first run the heap
// will enable minor GC.
YoungGenerationEnabler::Enable();
CollectMajor(); CollectMajor();
SimpleGCedBase::destructed_objects = 0; SimpleGCedBase::destructed_objects = 0;
} }
~MinorGCTest() override {
YoungGenerationEnabler::DisableForTesting();
Heap::From(GetHeap())->DisableGenerationalGCForTesting();
}
static size_t DestructedObjects() { static size_t DestructedObjects() {
return SimpleGCedBase::destructed_objects; return SimpleGCedBase::destructed_objects;
} }
......
...@@ -190,7 +190,7 @@ class NoWriteBarrierTest : public testing::TestWithHeap {}; ...@@ -190,7 +190,7 @@ class NoWriteBarrierTest : public testing::TestWithHeap {};
TEST_F(WriteBarrierTest, EnableDisableIncrementalMarking) { TEST_F(WriteBarrierTest, EnableDisableIncrementalMarking) {
{ {
IncrementalMarkingScope scope(marker()); IncrementalMarkingScope scope(marker());
EXPECT_TRUE(WriteBarrier::IsAnyIncrementalOrConcurrentMarking()); EXPECT_TRUE(WriteBarrier::IsEnabled());
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment