Commit 49c406d1 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Put minor MC behind a build time flag

gn flag: v8_enable_minor_mc

The default is 'true' until infra is updated to be able to build and
test with it using this flag.

Bug: v8:7638
Change-Id: I7946eb9bf4087c528d1a844b156a726a1c0671bf
Reviewed-on: https://chromium-review.googlesource.com/1000777
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52485}
parent 79a60792
......@@ -147,6 +147,9 @@ declare_args() {
# Enable mitigations for executing untrusted code.
v8_untrusted_code_mitigations = true
# Enable minor mark compact.
v8_enable_minor_mc = true
}
# Derived defaults.
......@@ -274,6 +277,9 @@ config("features") {
if (v8_enable_vtunejit) {
defines += [ "ENABLE_VTUNE_JIT_INTERFACE" ]
}
if (v8_enable_minor_mc) {
defines += [ "ENABLE_MINOR_MC" ]
}
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
......
......@@ -692,11 +692,6 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
DEFINE_BOOL(minor_mc_parallel_marking, true,
"use parallel marking for the young generation")
DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
"use concurrent store buffer processing")
......@@ -1079,6 +1074,17 @@ DEFINE_STRING(startup_src, nullptr,
DEFINE_STRING(startup_blob, nullptr,
"Write V8 startup blob file. (mksnapshot only)")
//
// Minor mark compact collector flags.
//
#ifdef ENABLE_MINOR_MC
DEFINE_BOOL(minor_mc_parallel_marking, true,
"use parallel marking for the young generation")
DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
"trace parallel marking for the young generation")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
#endif // ENABLE_MINOR_MC
//
// Dev shell flags
//
......@@ -1345,7 +1351,9 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge)
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer)
#ifdef ENABLE_MINOR_MC
DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking)
#endif // ENABLE_MINOR_MC
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_array_buffer_freeing)
#undef FLAG
......
......@@ -1852,6 +1852,7 @@ void Heap::MarkCompact() {
}
void Heap::MinorMarkCompact() {
#ifdef ENABLE_MINOR_MC
DCHECK(FLAG_minor_mc);
PauseAllocationObserversScope pause_observers(this);
......@@ -1869,6 +1870,9 @@ void Heap::MinorMarkCompact() {
LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
SetGCState(NOT_IN_GC);
#else
UNREACHABLE();
#endif // ENABLE_MINOR_MC
}
void Heap::MarkCompactEpilogue() {
......@@ -5911,7 +5915,11 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
#ifdef ENABLE_MINOR_MC
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
#else
minor_mark_compact_collector_ = nullptr;
#endif // ENABLE_MINOR_MC
array_buffer_collector_ = new ArrayBufferCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
......@@ -5928,9 +5936,11 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
mark_compact_collector()->SetUp();
#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector() != nullptr) {
minor_mark_compact_collector()->SetUp();
}
#endif // ENABLE_MINOR_MC
idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
......@@ -6112,11 +6122,13 @@ void Heap::TearDown() {
mark_compact_collector_ = nullptr;
}
#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector_ != nullptr) {
minor_mark_compact_collector_->TearDown();
delete minor_mark_compact_collector_;
minor_mark_compact_collector_ = nullptr;
}
#endif // ENABLE_MINOR_MC
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
......
......@@ -716,7 +716,11 @@ class Heap {
}
static inline GarbageCollector YoungGenerationCollector() {
#if ENABLE_MINOR_MC
return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
#else
return SCAVENGER;
#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
......
......@@ -588,8 +588,12 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
Map* filler_map = heap_->one_pointer_filler_map();
#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
#else
void* minor_marking_state = nullptr;
#endif // ENABLE_MINOR_MC
marking_worklist()->Update([this, filler_map, minor_marking_state](
HeapObject* obj, HeapObject** out) -> bool {
......@@ -613,19 +617,24 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
return true;
}
#endif // ENABLE_MINOR_MC
return false;
} else {
// The object may be on a page that was moved from new to old space.
// The object may be on a page that was moved from new to old space. Only
// applicable during minor MC garbage collections.
if (Page::FromAddress(obj->address())
->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
return true;
}
#endif // ENABLE_MINOR_MC
return false;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
......
This diff is collapsed.
......@@ -354,76 +354,6 @@ class MinorNonAtomicMarkingState final
}
};
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
using MarkingState = MinorMarkingState;
using NonAtomicMarkingState = MinorNonAtomicMarkingState;
explicit MinorMarkCompactCollector(Heap* heap);
~MinorMarkCompactCollector();
MarkingState* marking_state() { return &marking_state_; }
NonAtomicMarkingState* non_atomic_marking_state() {
return &non_atomic_marking_state_;
}
void SetUp() override;
void TearDown() override;
void CollectGarbage() override;
void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
FreeSpaceTreatmentMode free_space_mode);
void CleanupSweepToIteratePages();
private:
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
class RootMarkingVisitor;
static const int kNumMarkers = 8;
static const int kMainMarker = 0;
inline MarkingWorklist* worklist() { return worklist_; }
inline YoungGenerationMarkingVisitor* main_marking_visitor() {
return main_marking_visitor_;
}
void MarkLiveObjects() override;
void MarkRootSetInParallel();
void ProcessMarkingWorklist() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
void EvacuateEpilogue() override;
void Evacuate() override;
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
Address end) override;
UpdatingItem* CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
int NumberOfParallelMarkingTasks(int pages);
MarkingWorklist* worklist_;
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingVisitor;
};
// This marking state is used when concurrent marking is running.
class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
......@@ -984,6 +914,80 @@ class EvacuationScope {
MarkCompactCollector* collector_;
};
#ifdef ENABLE_MINOR_MC
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
using MarkingState = MinorMarkingState;
using NonAtomicMarkingState = MinorNonAtomicMarkingState;
explicit MinorMarkCompactCollector(Heap* heap);
~MinorMarkCompactCollector();
MarkingState* marking_state() { return &marking_state_; }
NonAtomicMarkingState* non_atomic_marking_state() {
return &non_atomic_marking_state_;
}
void SetUp() override;
void TearDown() override;
void CollectGarbage() override;
void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
FreeSpaceTreatmentMode free_space_mode);
void CleanupSweepToIteratePages();
private:
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
class RootMarkingVisitor;
static const int kNumMarkers = 8;
static const int kMainMarker = 0;
inline MarkingWorklist* worklist() { return worklist_; }
inline YoungGenerationMarkingVisitor* main_marking_visitor() {
return main_marking_visitor_;
}
void MarkLiveObjects() override;
void MarkRootSetInParallel();
void ProcessMarkingWorklist() override;
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
void EvacuateEpilogue() override;
void Evacuate() override;
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
Address end) override;
UpdatingItem* CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
int NumberOfParallelMarkingTasks(int pages);
MarkingWorklist* worklist_;
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingVisitor;
};
#endif // ENABLE_MINOR_MC
} // namespace internal
} // namespace v8
......
......@@ -61,10 +61,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
#ifdef ENABLE_MINOR_MC
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
#else
DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
#endif // ENABLE_MINOR_MC
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
......@@ -687,6 +691,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
......@@ -694,6 +699,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
......
......@@ -1191,9 +1191,11 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
};
TEST(InternalizeExternal) {
#ifdef ENABLE_MINOR_MC
// TODO(mlippautz): Remove once we add support for forwarding ThinStrings in
// minor MC.
// minor MC
if (FLAG_minor_mc) return;
#endif // ENABLE_MINOR_MC
FLAG_stress_incremental_marking = false;
FLAG_thin_strings = true;
CcTest::InitializeVM();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment