Commit c7dfa3fa authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: young-gen: Add runtime option for young generation

The CL introduces a new option --cppgc-young-generation. This option
can't be enabled statically, because V8 options are parsed after heap
initialization. The CL changes minor GC so that it can be enabled
dynamically. The way it works is as follows:
- the user calls YoungGenerationEnabler::Enable();
- a heap checks in the next atomic pause whether the flag was enabled;
- if so, the heap enables young generation for itself.

To avoid barrier regressions without young-generation enabled, the CL changes the meaning of the global flag is-any-incremental-or-concurrent-marking to is-barrier-enabled.

The runtime option would enable us to test young generation on try-
and performance-bots.

Bug: chromium:1029379
Change-Id: I664cccdcd208225ffcbf9901f1284b56d088c5c3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3607993
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80274}
parent 69ca2bde
......@@ -59,7 +59,7 @@ class V8_EXPORT Heap {
};
/**
* Specifies supported marking types
* Specifies supported marking types.
*/
enum class MarkingType : uint8_t {
/**
......@@ -79,7 +79,7 @@ class V8_EXPORT Heap {
};
/**
* Specifies supported sweeping types
* Specifies supported sweeping types.
*/
enum class SweepingType : uint8_t {
/**
......
......@@ -75,6 +75,7 @@ struct CagedHeapLocalData final {
CagedHeapLocalData(HeapBase&, PageAllocator&);
bool is_incremental_marking_in_progress = false;
bool is_young_generation_enabled = false;
HeapBase& heap_base;
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;
......
......@@ -95,12 +95,10 @@ class V8_EXPORT WriteBarrier final {
static void CheckParams(Type expected_type, const Params& params) {}
#endif // !V8_ENABLE_CHECKS
// The IncrementalOrConcurrentUpdater class allows cppgc internal to update
// |incremental_or_concurrent_marking_flag_|.
class IncrementalOrConcurrentMarkingFlagUpdater;
static bool IsAnyIncrementalOrConcurrentMarking() {
return incremental_or_concurrent_marking_flag_.MightBeEntered();
}
// The FlagUpdater class allows cppgc internal to update
// |write_barrier_enabled_|.
class FlagUpdater;
static bool IsEnabled() { return write_barrier_enabled_.MightBeEntered(); }
private:
WriteBarrier() = delete;
......@@ -130,7 +128,7 @@ class V8_EXPORT WriteBarrier final {
const CagedHeapLocalData& local_data, const void* object);
#endif // CPPGC_YOUNG_GENERATION
static AtomicEntryFlag incremental_or_concurrent_marking_flag_;
static AtomicEntryFlag write_barrier_enabled_;
};
template <WriteBarrier::Type type>
......@@ -216,17 +214,17 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void* value,
WriteBarrier::Params& params,
HeapHandleCallback) {
#if !defined(CPPGC_YOUNG_GENERATION)
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
#endif // !CPPGC_YOUNG_GENERATION
bool within_cage = TryGetCagedHeap(slot, value, params);
if (!within_cage) {
return WriteBarrier::Type::kNone;
}
if (V8_LIKELY(!params.caged_heap().is_incremental_marking_in_progress)) {
const bool within_cage = TryGetCagedHeap(slot, value, params);
if (!within_cage) return WriteBarrier::Type::kNone;
const auto& caged_heap = params.caged_heap();
if (V8_LIKELY(!caged_heap.is_incremental_marking_in_progress)) {
#if defined(CPPGC_YOUNG_GENERATION)
if (!caged_heap.is_young_generation_enabled)
return WriteBarrier::Type::kNone;
params.heap = reinterpret_cast<HeapHandle*>(params.start);
params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
params.value_offset = reinterpret_cast<uintptr_t>(value) - params.start;
......@@ -235,6 +233,8 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
#endif // !CPPGC_YOUNG_GENERATION
}
// Use marking barrier.
params.heap = reinterpret_cast<HeapHandle*>(params.start);
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
......@@ -247,10 +247,15 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
#if defined(CPPGC_YOUNG_GENERATION)
HeapHandle& handle = callback();
if (V8_LIKELY(!IsMarking(handle, params))) {
// params.start is populated by IsMarking().
if (!params.caged_heap().is_young_generation_enabled)
return WriteBarrier::Type::kNone;
params.heap = &handle;
params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
// params.value_offset stays 0.
......@@ -260,15 +265,12 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
}
return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
}
#else // !CPPGC_YOUNG_GENERATION
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
#else // !defined(CPPGC_YOUNG_GENERATION)
HeapHandle& handle = callback();
if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
#endif // !CPPGC_YOUNG_GENERATION
#endif // !defined(CPPGC_YOUNG_GENERATION)
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
......@@ -317,7 +319,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
if (object <= static_cast<void*>(kSentinelPointer)) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
if (V8_LIKELY(!WriteBarrier::IsEnabled())) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
if (IsMarking(object, &params.heap)) {
......@@ -334,7 +336,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
if (V8_UNLIKELY(WriteBarrier::IsAnyIncrementalOrConcurrentMarking())) {
if (V8_UNLIKELY(WriteBarrier::IsEnabled())) {
HeapHandle& handle = callback();
if (IsMarking(handle)) {
params.heap = &handle;
......
......@@ -1257,6 +1257,8 @@ DEFINE_INT(scavenge_task_trigger, 80,
DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(cppgc_young_generation, false,
"run young generation garbage collections in Oilpan")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#if defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
......
......@@ -553,7 +553,8 @@ void CppHeap::InitializeTracing(CollectionType collection_type,
collection_type_ = collection_type;
#if defined(CPPGC_YOUNG_GENERATION)
if (*collection_type_ == CollectionType::kMajor)
if (generational_gc_supported() &&
*collection_type_ == CollectionType::kMajor)
cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
......@@ -661,7 +662,11 @@ void CppHeap::TraceEpilogue() {
USE(bytes_allocated_in_prefinalizers);
#if defined(CPPGC_YOUNG_GENERATION)
ResetRememberedSet();
// Check if the young generation was enabled via flag.
if (FLAG_cppgc_young_generation)
cppgc::internal::YoungGenerationEnabler::Enable();
ResetRememberedSetAndEnableMinorGCIfNeeded();
#endif // defined(CPPGC_YOUNG_GENERATION)
{
......@@ -679,6 +684,7 @@ void CppHeap::TraceEpilogue() {
SweepingType::kAtomic == sweeping_config.sweeping_type);
sweeper().Start(sweeping_config);
}
in_atomic_pause_ = false;
collection_type_.reset();
sweeper().NotifyDoneIfNeeded();
......@@ -687,6 +693,7 @@ void CppHeap::TraceEpilogue() {
void CppHeap::RunMinorGC(StackState stack_state) {
DCHECK(!sweeper_.IsSweepingInProgress());
if (!generational_gc_supported()) return;
if (in_no_gc_scope()) return;
// Minor GC does not support nesting in full GCs.
if (IsMarking()) return;
......
......@@ -77,5 +77,11 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
v8::base::PageFreeingMode::kMakeInaccessible);
}
#if defined(CPPGC_YOUNG_GENERATION)
void CagedHeap::EnableGenerationalGC() {
local_data().is_young_generation_enabled = true;
}
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
......@@ -42,6 +42,10 @@ class CagedHeap final {
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
#if defined(CPPGC_YOUNG_GENERATION)
void EnableGenerationalGC();
#endif // defined(CPPGC_YOUNG_GENERATION)
AllocatorType& allocator() { return *bounded_allocator_; }
const AllocatorType& allocator() const { return *bounded_allocator_; }
......
......@@ -322,7 +322,13 @@ class CompactionState final {
Pages available_pages_;
};
void CompactPage(NormalPage* page, CompactionState& compaction_state) {
enum class StickyBits : uint8_t {
kDisabled,
kEnabled,
};
void CompactPage(NormalPage* page, CompactionState& compaction_state,
StickyBits sticky_bits) {
compaction_state.AddPage(page);
page->object_start_bitmap().Clear();
......@@ -360,9 +366,12 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
}
// Object is marked.
#if !defined(CPPGC_YOUNG_GENERATION)
#if defined(CPPGC_YOUNG_GENERATION)
if (sticky_bits == StickyBits::kDisabled) header->Unmark();
#else // !defined(CPPGC_YOUNG_GENERATION)
header->Unmark();
#endif
#endif // !defined(CPPGC_YOUNG_GENERATION)
// Potentially unpoison the live object as well as it is the source of
// the copy.
ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize());
......@@ -373,8 +382,8 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
compaction_state.FinishCompactingPage(page);
}
void CompactSpace(NormalPageSpace* space,
MovableReferences& movable_references) {
void CompactSpace(NormalPageSpace* space, MovableReferences& movable_references,
StickyBits sticky_bits) {
using Pages = NormalPageSpace::Pages;
#ifdef V8_USE_ADDRESS_SANITIZER
......@@ -417,7 +426,7 @@ void CompactSpace(NormalPageSpace* space,
CompactionState compaction_state(space, movable_references);
for (BasePage* page : pages) {
// Large objects do not belong to this arena.
CompactPage(NormalPage::From(page), compaction_state);
CompactPage(NormalPage::From(page), compaction_state, sticky_bits);
}
compaction_state.FinishCompactingSpace();
......@@ -508,8 +517,12 @@ Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
}
compaction_worklists_.reset();
const bool young_gen_enabled = heap_.heap()->generational_gc_supported();
for (NormalPageSpace* space : compactable_spaces_) {
CompactSpace(space, movable_references);
CompactSpace(
space, movable_references,
young_gen_enabled ? StickyBits::kEnabled : StickyBits::kDisabled);
}
enable_for_next_gc_for_testing_ = false;
......
......@@ -68,10 +68,12 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
}
#if defined(CPPGC_YOUNG_GENERATION)
auto& heap_base = HeapBase::From(heap_handle);
heap_base.remembered_set().InvalidateRememberedSlotsInRange(
object, reinterpret_cast<uint8_t*>(object) + object_size);
// If this object was registered as remembered, remove it.
heap_base.remembered_set().InvalidateRememberedSourceObject(header);
if (heap_base.generational_gc_supported()) {
heap_base.remembered_set().InvalidateRememberedSlotsInRange(
object, reinterpret_cast<uint8_t*>(object) + object_size);
// If this object was registered as remembered, remove it.
heap_base.remembered_set().InvalidateRememberedSourceObject(header);
}
#endif // defined(CPPGC_YOUNG_GENERATION)
}
......@@ -122,8 +124,11 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
header.SetAllocatedSize(new_size);
}
#if defined(CPPGC_YOUNG_GENERATION)
base_page.heap().remembered_set().InvalidateRememberedSlotsInRange(
free_start, free_start + size_delta);
auto& heap = base_page.heap();
if (heap.generational_gc_supported()) {
heap.remembered_set().InvalidateRememberedSlotsInRange(
free_start, free_start + size_delta);
}
#endif // defined(CPPGC_YOUNG_GENERATION)
// Return success in any case, as we want to avoid that embedders start
// copying memory because of small deltas.
......
......@@ -20,6 +20,7 @@
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/unmarker.h"
#include "src/heap/cppgc/write-barrier.h"
namespace cppgc {
namespace internal {
......@@ -88,7 +89,8 @@ HeapBase::HeapBase(
#endif // defined(CPPGC_YOUNG_GENERATION)
stack_support_(stack_support),
marking_support_(marking_support),
sweeping_support_(sweeping_support) {
sweeping_support_(sweeping_support),
generation_support_(GenerationSupport::kSingleGeneration) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
}
......@@ -120,7 +122,8 @@ size_t HeapBase::ExecutePreFinalizers() {
}
#if defined(CPPGC_YOUNG_GENERATION)
void HeapBase::ResetRememberedSet() {
void HeapBase::ResetRememberedSetAndEnableMinorGCIfNeeded() {
DCHECK(in_atomic_pause());
class AllLABsAreEmpty final : protected HeapVisitor<AllLABsAreEmpty> {
friend class HeapVisitor<AllLABsAreEmpty>;
......@@ -140,9 +143,30 @@ void HeapBase::ResetRememberedSet() {
bool some_lab_is_set_ = false;
};
DCHECK(AllLABsAreEmpty(raw_heap()).value());
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
remembered_set_.Reset();
if (generational_gc_supported()) {
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
remembered_set_.Reset();
return;
}
DCHECK(remembered_set_.IsEmpty());
// Check if the young generation was enabled since the last cycle.
if (YoungGenerationEnabler::IsEnabled()) {
// Enable young generation for the current heap.
caged_heap().EnableGenerationalGC();
generation_support_ = GenerationSupport::kYoungAndOldGenerations;
}
}
void HeapBase::DisableGenerationalGCForTesting() {
DCHECK(caged_heap().local_data().is_young_generation_enabled);
DCHECK_EQ(GenerationSupport::kYoungAndOldGenerations, generation_support_);
caged_heap().local_data().is_young_generation_enabled = false;
generation_support_ = GenerationSupport::kSingleGeneration;
}
#endif // defined(CPPGC_YOUNG_GENERATION)
void HeapBase::Terminate() {
......@@ -168,10 +192,12 @@ void HeapBase::Terminate() {
}
#if defined(CPPGC_YOUNG_GENERATION)
// Unmark the heap so that the sweeper destructs all objects.
// TODO(chromium:1029379): Merge two heap iterations (unmarking + sweeping)
// into forced finalization.
SequentialUnmarker unmarker(raw_heap());
if (generational_gc_supported()) {
// Unmark the heap so that the sweeper destructs all objects.
// TODO(chromium:1029379): Merge two heap iterations (unmarking +
// sweeping) into forced finalization.
SequentialUnmarker unmarker(raw_heap());
}
#endif // defined(CPPGC_YOUNG_GENERATION)
in_atomic_pause_ = true;
......
......@@ -24,6 +24,7 @@
#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/cppgc/write-barrier.h"
#include "v8config.h" // NOLINT(build/include_directory)
#if defined(CPPGC_CAGED_HEAP)
......@@ -214,7 +215,25 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
MarkingType marking_support() const { return marking_support_; }
SweepingType sweeping_support() const { return sweeping_support_; }
bool generational_gc_supported() const {
const bool supported =
(generation_support_ == GenerationSupport::kYoungAndOldGenerations);
#if defined(CPPGC_YOUNG_GENERATION)
DCHECK_IMPLIES(supported, YoungGenerationEnabler::IsEnabled());
#endif // defined(CPPGC_YOUNG_GENERATION)
return supported;
}
#if defined(CPPGC_YOUNG_GENERATION)
void DisableGenerationalGCForTesting();
#endif // defined(CPPGC_YOUNG_GENERATION)
protected:
enum class GenerationSupport : uint8_t {
kSingleGeneration,
kYoungAndOldGenerations,
};
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) = 0;
......@@ -227,7 +246,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
size_t ExecutePreFinalizers();
#if defined(CPPGC_YOUNG_GENERATION)
void ResetRememberedSet();
void ResetRememberedSetAndEnableMinorGCIfNeeded();
#endif // defined(CPPGC_YOUNG_GENERATION)
PageAllocator* page_allocator() const;
......@@ -286,6 +305,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
const MarkingType marking_support_;
const SweepingType sweeping_support_;
GenerationSupport generation_support_;
friend class MarkerBase::IncrementalMarkingTask;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
......
......@@ -190,7 +190,7 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
USE(bytes_allocated_in_prefinalizers);
#if defined(CPPGC_YOUNG_GENERATION)
ResetRememberedSet();
ResetRememberedSetAndEnableMinorGCIfNeeded();
#endif // defined(CPPGC_YOUNG_GENERATION)
subtle::NoGarbageCollectionScope no_gc(*this);
......
......@@ -37,7 +37,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
WriteBarrier::FlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif // defined(CPPGC_CAGED_HEAP)
......@@ -51,7 +51,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
WriteBarrier::FlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif // defined(CPPGC_CAGED_HEAP)
......@@ -155,7 +155,11 @@ MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
platform_(platform),
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
mutator_marking_state_(heap, marking_worklists_,
heap.compactor().compaction_worklists()) {}
heap.compactor().compaction_worklists()) {
DCHECK_IMPLIES(
config_.collection_type == MarkingConfig::CollectionType::kMinor,
heap_.generational_gc_supported());
}
MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects.
......@@ -340,19 +344,21 @@ void MarkerBase::ProcessWeakness() {
// Call weak callbacks on objects that may now be pointing to dead objects.
LivenessBroker broker = LivenessBrokerFactory::Create();
#if defined(CPPGC_YOUNG_GENERATION)
auto& remembered_set = heap().remembered_set();
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
// Custom callbacks assume that untraced pointers point to not yet freed
// objects. They must make sure that upon callback completion no
// UntracedMember points to a freed object. This may not hold true if a
// custom callback for an old object operates with a reference to a young
// object that was freed on a minor collection cycle. To maintain the
// invariant that UntracedMembers always point to valid objects, execute
// custom callbacks for old objects on each minor collection cycle.
remembered_set.ExecuteCustomCallbacks(broker);
} else {
// For major GCs, just release all the remembered weak callbacks.
remembered_set.ReleaseCustomCallbacks();
if (heap().generational_gc_supported()) {
auto& remembered_set = heap().remembered_set();
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
// Custom callbacks assume that untraced pointers point to not yet freed
// objects. They must make sure that upon callback completion no
// UntracedMember points to a freed object. This may not hold true if a
// custom callback for an old object operates with a reference to a young
// object that was freed on a minor collection cycle. To maintain the
// invariant that UntracedMembers always point to valid objects, execute
// custom callbacks for old objects on each minor collection cycle.
remembered_set.ExecuteCustomCallbacks(broker);
} else {
// For major GCs, just release all the remembered weak callbacks.
remembered_set.ReleaseCustomCallbacks();
}
}
#endif // defined(CPPGC_YOUNG_GENERATION)
......@@ -362,7 +368,8 @@ void MarkerBase::ProcessWeakness() {
while (local.Pop(&item)) {
item.callback(broker, item.parameter);
#if defined(CPPGC_YOUNG_GENERATION)
heap().remembered_set().AddWeakCallback(item);
if (heap().generational_gc_supported())
heap().remembered_set().AddWeakCallback(item);
#endif // defined(CPPGC_YOUNG_GENERATION)
}
......
......@@ -30,6 +30,8 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION)
DCHECK_LT(begin, end);
if (!page->heap().generational_gc_supported()) return;
// Then, if the page is newly allocated, force the first and last cards to be
// marked as young.
const bool new_page =
......
......@@ -241,8 +241,7 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active.
if (mode == AccessMode::kAtomic) {
if (V8_LIKELY(!WriteBarrier::IsAnyIncrementalOrConcurrentMarking()))
return true;
if (V8_LIKELY(!WriteBarrier::IsEnabled())) return true;
}
#endif // defined(V8_TARGET_ARCH_ARM)
return false;
......
......@@ -7,6 +7,7 @@
#include <algorithm>
#include "include/cppgc/visitor.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/marking-state.h"
......@@ -76,14 +77,17 @@ void VisitRememberedSourceObjects(
} // namespace
void OldToNewRememberedSet::AddSlot(void* slot) {
DCHECK(heap_.generational_gc_supported());
remembered_slots_.insert(slot);
}
void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) {
DCHECK(heap_.generational_gc_supported());
remembered_source_objects_.insert(&hoh);
}
void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
DCHECK(heap_.generational_gc_supported());
// TODO(1029379): WeakCallbacks are also executed for weak collections.
// Consider splitting weak-callbacks in custom weak callbacks and ones for
// collections.
......@@ -92,6 +96,7 @@ void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
void* end) {
DCHECK(heap_.generational_gc_supported());
// TODO(1029379): The 2 binary walks can be optimized with a custom algorithm.
auto from = remembered_slots_.lower_bound(begin),
to = remembered_slots_.lower_bound(end);
......@@ -108,29 +113,39 @@ void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
void OldToNewRememberedSet::InvalidateRememberedSourceObject(
HeapObjectHeader& header) {
DCHECK(heap_.generational_gc_supported());
remembered_source_objects_.erase(&header);
}
void OldToNewRememberedSet::Visit(Visitor& visitor,
MutatorMarkingState& marking_state) {
DCHECK(heap_.generational_gc_supported());
VisitRememberedSlots(remembered_slots_, heap_, marking_state);
VisitRememberedSourceObjects(remembered_source_objects_, visitor);
}
void OldToNewRememberedSet::ExecuteCustomCallbacks(LivenessBroker broker) {
DCHECK(heap_.generational_gc_supported());
for (const auto& callback : remembered_weak_callbacks_) {
callback.callback(broker, callback.parameter);
}
}
void OldToNewRememberedSet::ReleaseCustomCallbacks() {
DCHECK(heap_.generational_gc_supported());
remembered_weak_callbacks_.clear();
}
void OldToNewRememberedSet::Reset() {
DCHECK(heap_.generational_gc_supported());
remembered_slots_.clear();
remembered_source_objects_.clear();
}
bool OldToNewRememberedSet::IsEmpty() const {
return remembered_slots_.empty() && remembered_source_objects_.empty() &&
remembered_weak_callbacks_.empty();
}
} // namespace internal
} // namespace cppgc
......@@ -45,6 +45,8 @@ class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
void Reset();
bool IsEmpty() const;
private:
friend class MinorGCTest;
......
......@@ -32,6 +32,11 @@ namespace {
using v8::base::Optional;
enum class StickyBits : uint8_t {
kDisabled,
kEnabled,
};
class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>;
......@@ -199,11 +204,14 @@ struct SpaceState {
using SpaceStates = std::vector<SpaceState>;
void StickyUnmark(HeapObjectHeader* header) {
void StickyUnmark(HeapObjectHeader* header, StickyBits sticky_bits) {
#if defined(CPPGC_YOUNG_GENERATION)
// Young generation in Oilpan uses sticky mark bits.
#if !defined(CPPGC_YOUNG_GENERATION)
if (sticky_bits == StickyBits::kDisabled)
header->Unmark<AccessMode::kAtomic>();
#else // !defined(CPPGC_YOUNG_GENERATION)
header->Unmark<AccessMode::kAtomic>();
#endif
#endif // !defined(CPPGC_YOUNG_GENERATION)
}
class InlinedFinalizationBuilderBase {
......@@ -285,13 +293,13 @@ class DeferredFinalizationBuilder final : public FreeHandler {
private:
ResultType result_;
HeapObjectHeader* current_unfinalized_ = 0;
HeapObjectHeader* current_unfinalized_ = nullptr;
bool found_finalizer_ = false;
};
template <typename FinalizationBuilder>
typename FinalizationBuilder::ResultType SweepNormalPage(
NormalPage* page, PageAllocator& page_allocator) {
NormalPage* page, PageAllocator& page_allocator, StickyBits sticky_bits) {
constexpr auto kAtomicAccess = AccessMode::kAtomic;
FinalizationBuilder builder(*page, page_allocator);
......@@ -345,7 +353,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
largest_new_free_list_entry =
std::max(largest_new_free_list_entry, new_free_list_entry_size);
}
StickyUnmark(header);
StickyUnmark(header, sticky_bits);
begin += size;
start_of_gap = begin;
live_bytes += size;
......@@ -484,11 +492,15 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
public:
MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform,
MutatorThreadSweeper(HeapBase* heap, SpaceStates* states,
cppgc::Platform* platform,
FreeMemoryHandling free_memory_handling)
: states_(states),
platform_(platform),
free_memory_handling_(free_memory_handling) {}
free_memory_handling_(free_memory_handling),
sticky_bits_(heap->generational_gc_supported()
? StickyBits::kEnabled
: StickyBits::kDisabled) {}
void Sweep() {
for (SpaceState& state : *states_) {
......@@ -553,9 +565,9 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
(free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
? SweepNormalPage<
InlinedFinalizationBuilder<DiscardingFreeHandler>>(
&page, *platform_->GetPageAllocator())
&page, *platform_->GetPageAllocator(), sticky_bits_)
: SweepNormalPage<InlinedFinalizationBuilder<RegularFreeHandler>>(
&page, *platform_->GetPageAllocator());
&page, *platform_->GetPageAllocator(), sticky_bits_);
if (result.is_empty) {
NormalPage::Destroy(&page);
} else {
......@@ -572,7 +584,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
bool VisitLargePage(LargePage& page) {
HeapObjectHeader* header = page.ObjectHeader();
if (header->IsMarked()) {
StickyUnmark(header);
StickyUnmark(header, sticky_bits_);
page.space().AddPage(&page);
} else {
header->Finalize();
......@@ -585,6 +597,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
const FreeMemoryHandling free_memory_handling_;
const StickyBits sticky_bits_;
};
class ConcurrentSweepTask final : public cppgc::JobTask,
......@@ -599,7 +612,10 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
: heap_(heap),
states_(states),
platform_(platform),
free_memory_handling_(free_memory_handling) {}
free_memory_handling_(free_memory_handling),
sticky_bits_(heap.generational_gc_supported() ? StickyBits::kEnabled
: StickyBits::kDisabled) {
}
void Run(cppgc::JobDelegate* delegate) final {
StatsCollector::EnabledConcurrentScope stats_scope(
......@@ -627,9 +643,9 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
(free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
? SweepNormalPage<
DeferredFinalizationBuilder<DiscardingFreeHandler>>(
&page, *platform_->GetPageAllocator())
&page, *platform_->GetPageAllocator(), sticky_bits_)
: SweepNormalPage<DeferredFinalizationBuilder<RegularFreeHandler>>(
&page, *platform_->GetPageAllocator());
&page, *platform_->GetPageAllocator(), sticky_bits_);
const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& space_state = (*states_)[space_index];
......@@ -640,7 +656,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
bool VisitLargePage(LargePage& page) {
HeapObjectHeader* header = page.ObjectHeader();
if (header->IsMarked()) {
StickyUnmark(header);
StickyUnmark(header, sticky_bits_);
page.space().AddPage(&page);
return true;
}
......@@ -668,6 +684,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
Platform* platform_;
std::atomic_bool is_completed_{false};
const FreeMemoryHandling free_memory_handling_;
const StickyBits sticky_bits_;
};
// This visitor:
......@@ -805,7 +822,7 @@ class Sweeper::SweeperImpl final {
{
// Then, if no matching slot is found in the unfinalized pages, search the
// unswept page. This also helps out the concurrent sweeper.
MutatorThreadSweeper sweeper(&space_states_, platform_,
MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
config_.free_memory_handling);
while (auto page = space_state.unswept_pages.Pop()) {
sweeper.SweepPage(**page);
......@@ -863,7 +880,7 @@ class Sweeper::SweeperImpl final {
finalizer.FinalizeHeap(&space_states_);
// Then, help out the concurrent thread.
MutatorThreadSweeper sweeper(&space_states_, platform_,
MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
config_.free_memory_handling);
sweeper.Sweep();
......@@ -915,7 +932,7 @@ class Sweeper::SweeperImpl final {
StatsCollector::EnabledScope stats_scope(
stats_collector_, StatsCollector::kIncrementalSweep);
MutatorThreadSweeper sweeper(&space_states_, platform_,
MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
config_.free_memory_handling);
{
StatsCollector::EnabledScope inner_stats_scope(
......
......@@ -21,7 +21,14 @@ namespace cppgc {
namespace internal {
// static
AtomicEntryFlag WriteBarrier::incremental_or_concurrent_marking_flag_;
AtomicEntryFlag WriteBarrier::write_barrier_enabled_;
#if defined(CPPGC_YOUNG_GENERATION)
// static
bool YoungGenerationEnabler::is_enabled_;
// static
v8::base::LeakyObject<v8::base::Mutex> YoungGenerationEnabler::mutex_;
#endif // defined(CPPGC_YOUNG_GENERATION)
namespace {
......@@ -199,5 +206,28 @@ bool WriteBarrierTypeForCagedHeapPolicy::IsMarking(
#endif // CPPGC_CAGED_HEAP
#if defined(CPPGC_YOUNG_GENERATION)
void YoungGenerationEnabler::Enable() {
v8::base::LockGuard _(mutex_.get());
if (is_enabled_) return;
// Enter the flag so that the check in the write barrier will always trigger
// when young generation is enabled.
WriteBarrier::FlagUpdater::Enter();
is_enabled_ = true;
}
void YoungGenerationEnabler::DisableForTesting() {
v8::base::LockGuard _(mutex_.get());
if (!is_enabled_) return;
WriteBarrier::FlagUpdater::Exit();
is_enabled_ = false;
}
bool YoungGenerationEnabler::IsEnabled() {
v8::base::LockGuard _(mutex_.get());
return is_enabled_;
}
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
......@@ -6,15 +6,36 @@
#define V8_HEAP_CPPGC_WRITE_BARRIER_H_
#include "include/cppgc/internal/write-barrier.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
namespace cppgc {
namespace internal {
class WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater {
class WriteBarrier::FlagUpdater final {
public:
static void Enter() { incremental_or_concurrent_marking_flag_.Enter(); }
static void Exit() { incremental_or_concurrent_marking_flag_.Exit(); }
static void Enter() { write_barrier_enabled_.Enter(); }
static void Exit() { write_barrier_enabled_.Exit(); }
private:
FlagUpdater() = delete;
};
#if defined(CPPGC_YOUNG_GENERATION)
class V8_EXPORT_PRIVATE YoungGenerationEnabler final {
public:
static void Enable();
static void DisableForTesting();
static bool IsEnabled();
private:
YoungGenerationEnabler() = delete;
static bool is_enabled_;
static v8::base::LeakyObject<v8::base::Mutex> mutex_;
};
#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
......
......@@ -1668,7 +1668,7 @@ void GCTracer::ReportYoungCycleToRecorder() {
#if defined(CPPGC_YOUNG_GENERATION)
// Managed C++ heap statistics:
auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
if (cpp_heap) {
if (cpp_heap && cpp_heap->generational_gc_supported()) {
auto* metric_recorder = cpp_heap->GetMetricRecorder();
const base::Optional<cppgc::internal::MetricRecorder::GCCycle>
optional_cppgc_event = metric_recorder->ExtractLastYoungGcEvent();
......
......@@ -64,11 +64,20 @@ struct OtherType<Large> {
class MinorGCTest : public testing::TestWithHeap {
public:
MinorGCTest() {
MinorGCTest() : testing::TestWithHeap() {
// Enable young generation flag and run GC. After the first run the heap
// will enable minor GC.
YoungGenerationEnabler::Enable();
CollectMajor();
SimpleGCedBase::destructed_objects = 0;
}
~MinorGCTest() override {
YoungGenerationEnabler::DisableForTesting();
Heap::From(GetHeap())->DisableGenerationalGCForTesting();
}
static size_t DestructedObjects() {
return SimpleGCedBase::destructed_objects;
}
......
......@@ -190,7 +190,7 @@ class NoWriteBarrierTest : public testing::TestWithHeap {};
TEST_F(WriteBarrierTest, EnableDisableIncrementalMarking) {
{
IncrementalMarkingScope scope(marker());
EXPECT_TRUE(WriteBarrier::IsAnyIncrementalOrConcurrentMarking());
EXPECT_TRUE(WriteBarrier::IsEnabled());
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment