Commit 4e815bd6 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove COMPLETE state from IncrementalMarking

This CL removes the COMPLETE state from incremental marking. Since
then the only states left were STOPPED and MARKING, we can replace
the state with an is_running_ boolean field.

The state could change back-and-forth between MARKING and COMPLETE.
IsMarking() was already also checking for COMPLETE. So most code
already treated both states the same. IsComplete() now checks whether
marking is running and a transitive closure was reached already.

IncrementalMarking::Step() didn't process the marking queue when in
COMPLETE. This should be relatively rare though since it only
transitioned into COMPLETE when the stack guard was armed and the
allocation observer ran again before reaching a stack guard check.

Bug: v8:12775
Change-Id: Ied48d8c512ad3d1b3d2e29393d43b434b5fda8fe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3835689Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82581}
parent cb9bf85b
...@@ -3770,9 +3770,7 @@ size_t Heap::NewSpaceCapacity() { ...@@ -3770,9 +3770,7 @@ size_t Heap::NewSpaceCapacity() {
void Heap::FinalizeIncrementalMarkingIfComplete( void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) { GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsComplete() || if (incremental_marking()->IsComplete()) {
(incremental_marking()->IsMarking() &&
incremental_marking()->ShouldFinalize())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_); CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
} }
} }
...@@ -5403,7 +5401,7 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) { ...@@ -5403,7 +5401,7 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
if (ShouldOptimizeForLoadTime()) return true; if (ShouldOptimizeForLoadTime()) return true;
if (incremental_marking()->IsComplete()) { if (IsMarkingComplete(local_heap)) {
return !AllocationLimitOvershotByLargeMargin(); return !AllocationLimitOvershotByLargeMargin();
} }
...@@ -5425,6 +5423,11 @@ bool Heap::IsMainThreadParked(LocalHeap* local_heap) { ...@@ -5425,6 +5423,11 @@ bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
return local_heap->main_thread_parked_; return local_heap->main_thread_parked_;
} }
bool Heap::IsMarkingComplete(LocalHeap* local_heap) {
if (!local_heap || !local_heap->is_main_thread()) return false;
return incremental_marking()->IsComplete();
}
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() { Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) { if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal; return Heap::HeapGrowingMode::kMinimal;
......
...@@ -2029,6 +2029,7 @@ class Heap { ...@@ -2029,6 +2029,7 @@ class Heap {
LocalHeap* local_heap = nullptr); LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap); bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
bool IsMainThreadParked(LocalHeap* local_heap); bool IsMainThreadParked(LocalHeap* local_heap);
bool IsMarkingComplete(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode(); HeapGrowingMode CurrentHeapGrowingMode();
......
...@@ -30,16 +30,6 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) { ...@@ -30,16 +30,6 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
} }
} }
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Restarting (new grey objects)\n");
}
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -57,9 +57,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects) ...@@ -57,9 +57,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
old_generation_observer_(this, kOldGenerationAllocatedThreshold), old_generation_observer_(this, kOldGenerationAllocatedThreshold),
marking_state_(heap->isolate()), marking_state_(heap->isolate()),
atomic_marking_state_(heap->isolate()), atomic_marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()) { non_atomic_marking_state_(heap->isolate()) {}
SetState(STOPPED);
}
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange( void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) { HeapObject obj) {
...@@ -146,7 +144,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) { ...@@ -146,7 +144,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
: global_limit_mb - global_size_mb); : global_limit_mb - global_size_mb);
} }
DCHECK(FLAG_incremental_marking); DCHECK(FLAG_incremental_marking);
DCHECK_EQ(state_, STOPPED); DCHECK(IsStopped());
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC); DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled()); DCHECK(!heap_->isolate()->serializer_enabled());
...@@ -277,7 +275,8 @@ void IncrementalMarking::StartMarking() { ...@@ -277,7 +275,8 @@ void IncrementalMarking::StartMarking() {
collector_->StartMarking(); collector_->StartMarking();
SetState(MARKING); is_marking_ = true;
heap_->SetIsMarkingFlag(true);
MarkingBarrier::ActivateAll(heap(), is_compacting_); MarkingBarrier::ActivateAll(heap(), is_compacting_);
GlobalHandles::EnableMarkingBarrier(heap()->isolate()); GlobalHandles::EnableMarkingBarrier(heap()->isolate());
...@@ -523,7 +522,8 @@ bool IncrementalMarking::Stop() { ...@@ -523,7 +522,8 @@ bool IncrementalMarking::Stop() {
collection_requested_via_stack_guard_ = false; collection_requested_via_stack_guard_ = false;
heap_->isolate()->stack_guard()->ClearGC(); heap_->isolate()->stack_guard()->ClearGC();
SetState(STOPPED); is_marking_ = false;
heap_->SetIsMarkingFlag(false);
is_compacting_ = false; is_compacting_ = false;
FinishBlackAllocation(); FinishBlackAllocation();
...@@ -549,36 +549,6 @@ double IncrementalMarking::CurrentTimeToMarkingTask() const { ...@@ -549,36 +549,6 @@ double IncrementalMarking::CurrentTimeToMarkingTask() const {
return std::max(recorded_time_to_marking_task, current_time_to_marking_task); return std::max(recorded_time_to_marking_task, current_time_to_marking_task);
} }
void IncrementalMarking::TryMarkingComplete(StepOrigin step_origin) {
switch (step_origin) {
case StepOrigin::kTask:
MarkingComplete();
break;
case StepOrigin::kV8:
// The caller (e.g. allocation observer) cannot finalize marking. Schedule
// a completion task instead.
if (ShouldWaitForTask()) {
// Stay in MARKING state while waiting for the completion task. This
// ensures that subsequent Step() invocations will still perform
// marking.
} else {
// When task isn't run soon enough, fall back to stack guard to force
// completion.
MarkingComplete();
collection_requested_via_stack_guard_ = true;
heap_->isolate()->stack_guard()->RequestGC();
}
break;
default:
UNREACHABLE();
}
}
bool IncrementalMarking::ShouldWaitForTask() { bool IncrementalMarking::ShouldWaitForTask() {
if (!completion_task_scheduled_) { if (!completion_task_scheduled_) {
incremental_marking_job_.ScheduleTask( incremental_marking_job_.ScheduleTask(
...@@ -641,17 +611,8 @@ bool IncrementalMarking::TryInitializeTaskTimeout() { ...@@ -641,17 +611,8 @@ bool IncrementalMarking::TryInitializeTaskTimeout() {
} }
} }
void IncrementalMarking::MarkingComplete() {
SetState(COMPLETE);
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (normal).\n");
}
}
bool IncrementalMarking::ShouldDoEmbedderStep() { bool IncrementalMarking::ShouldDoEmbedderStep() {
return state_ == MARKING && FLAG_incremental_marking_wrappers && return IsRunning() && FLAG_incremental_marking_wrappers &&
heap_->local_embedder_heap_tracer()->InUse(); heap_->local_embedder_heap_tracer()->InUse();
} }
...@@ -707,12 +668,14 @@ void IncrementalMarking::AdvanceForTesting(double max_step_size_in_ms) { ...@@ -707,12 +668,14 @@ void IncrementalMarking::AdvanceForTesting(double max_step_size_in_ms) {
void IncrementalMarking::AdvanceOnAllocation() { void IncrementalMarking::AdvanceOnAllocation() {
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC); DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
DCHECK(FLAG_incremental_marking); DCHECK(FLAG_incremental_marking);
DCHECK(IsRunning());
// Code using an AlwaysAllocateScope assumes that the GC state does not // Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed. // change; that implies that no marking steps must be performed.
if (state_ != MARKING || heap_->always_allocate()) { if (heap_->always_allocate()) {
return; return;
} }
NestedTimedHistogramScope incremental_marking_scope( NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking()); heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
...@@ -720,6 +683,17 @@ void IncrementalMarking::AdvanceOnAllocation() { ...@@ -720,6 +683,17 @@ void IncrementalMarking::AdvanceOnAllocation() {
ThreadKind::kMain); ThreadKind::kMain);
ScheduleBytesToMarkBasedOnAllocation(); ScheduleBytesToMarkBasedOnAllocation();
Step(kMaxStepSizeInMs, StepOrigin::kV8); Step(kMaxStepSizeInMs, StepOrigin::kV8);
if (IsComplete()) {
// Marking cannot be finalized here. Schedule a completion task instead.
if (!ShouldWaitForTask()) {
// When task isn't run soon enough, fall back to stack guard to force
// completion.
collection_requested_via_stack_guard_ = true;
heap_->isolate()->stack_guard()->RequestGC();
}
}
} }
void IncrementalMarking::AdvanceWithDeadline(StepOrigin step_origin) { void IncrementalMarking::AdvanceWithDeadline(StepOrigin step_origin) {
...@@ -836,13 +810,14 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) { ...@@ -836,13 +810,14 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
void IncrementalMarking::Step(double max_step_size_in_ms, void IncrementalMarking::Step(double max_step_size_in_ms,
StepOrigin step_origin) { StepOrigin step_origin) {
DCHECK(IsRunning());
double start = heap_->MonotonicallyIncreasingTimeInMs(); double start = heap_->MonotonicallyIncreasingTimeInMs();
size_t bytes_to_process = 0; size_t bytes_to_process = 0;
size_t v8_bytes_processed = 0; size_t v8_bytes_processed = 0;
double embedder_duration = 0.0; double embedder_duration = 0.0;
double embedder_deadline = 0.0; double embedder_deadline = 0.0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
// It is safe to merge back all objects that were on hold to the shared // It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects // work list at Step because we are at a safepoint where all objects
...@@ -899,8 +874,6 @@ void IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -899,8 +874,6 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
embedder_result == StepResult::kNoImmediateWork) { embedder_result == StepResult::kNoImmediateWork) {
// TODO(v8:12775): Try to remove. // TODO(v8:12775): Try to remove.
FastForwardSchedule(); FastForwardSchedule();
TryMarkingComplete(step_origin);
} }
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
...@@ -908,6 +881,10 @@ void IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -908,6 +881,10 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
heap_->concurrent_marking()->RescheduleJobIfNeeded(); heap_->concurrent_marking()->RescheduleJobIfNeeded();
} }
const double current_time = heap_->MonotonicallyIncreasingTimeInMs();
const double v8_duration = current_time - start - embedder_duration;
heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp( heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms " "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms "
...@@ -915,14 +892,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -915,14 +892,7 @@ void IncrementalMarking::Step(double max_step_size_in_ms,
"in %.1f\n", "in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task", step_origin == StepOrigin::kV8 ? "in v8" : "in task",
v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration, v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start); embedder_deadline, current_time - start);
}
}
if (state_ == MARKING) {
const double v8_duration =
heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
} }
} }
......
...@@ -42,8 +42,6 @@ enum class StepResult { ...@@ -42,8 +42,6 @@ enum class StepResult {
class V8_EXPORT_PRIVATE IncrementalMarking final { class V8_EXPORT_PRIVATE IncrementalMarking final {
public: public:
enum State : uint8_t { STOPPED, MARKING, COMPLETE };
class V8_NODISCARD PauseBlackAllocationScope { class V8_NODISCARD PauseBlackAllocationScope {
public: public:
explicit PauseBlackAllocationScope(IncrementalMarking* marking) explicit PauseBlackAllocationScope(IncrementalMarking* marking)
...@@ -88,8 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -88,8 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
V8_INLINE void TransferColor(HeapObject from, HeapObject to); V8_INLINE void TransferColor(HeapObject from, HeapObject to);
V8_INLINE void RestartIfNotMarking();
IncrementalMarking(Heap* heap, WeakObjects* weak_objects); IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; } MarkingState* marking_state() { return &marking_state_; }
...@@ -100,10 +96,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -100,10 +96,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void NotifyLeftTrimming(HeapObject from, HeapObject to); void NotifyLeftTrimming(HeapObject from, HeapObject to);
bool IsStopped() const { return state() == STOPPED; } bool IsStopped() const { return !IsRunning(); }
bool IsRunning() const { return !IsStopped(); } bool IsRunning() const { return is_marking_; }
bool IsMarking() const { return state() >= MARKING; } bool IsMarking() const { return IsRunning(); }
bool IsComplete() const { return state() == COMPLETE; } bool IsComplete() const { return IsMarking() && ShouldFinalize(); }
bool CollectionRequested() const { bool CollectionRequested() const {
return collection_requested_via_stack_guard_; return collection_requested_via_stack_guard_;
...@@ -209,9 +205,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -209,9 +205,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// bytes and already marked bytes. // bytes and already marked bytes.
size_t ComputeStepSizeInBytes(StepOrigin step_origin); size_t ComputeStepSizeInBytes(StepOrigin step_origin);
void TryMarkingComplete(StepOrigin step_origin);
void MarkingComplete();
bool ShouldWaitForTask(); bool ShouldWaitForTask();
bool TryInitializeTaskTimeout(); bool TryInitializeTaskTimeout();
...@@ -228,16 +221,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -228,16 +221,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// from white to grey. // from white to grey.
bool WhiteToGreyAndPush(HeapObject obj); bool WhiteToGreyAndPush(HeapObject obj);
State state() const {
DCHECK_IMPLIES(state_ != STOPPED, FLAG_incremental_marking);
return state_;
}
void SetState(State s) {
state_ = s;
heap_->SetIsMarkingFlag(s >= MARKING);
}
double CurrentTimeToMarkingTask() const; double CurrentTimeToMarkingTask() const;
Heap* const heap_; Heap* const heap_;
...@@ -255,11 +238,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -255,11 +238,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking. // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_ = 0; size_t bytes_marked_concurrently_ = 0;
// Must use `SetState()` above to update `state_`. bool is_marking_ = false;
// Atomic since main thread can complete marking while a background thread's
// slow allocation path will check whether incremental marking is currently
// running.
std::atomic<State> state_;
bool is_compacting_ = false; bool is_compacting_ = false;
bool black_allocation_ = false; bool black_allocation_ = false;
......
...@@ -136,7 +136,8 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size, ...@@ -136,7 +136,8 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// Check if we want to force a GC before growing the old space further. // Check if we want to force a GC before growing the old space further.
// If so, fail the allocation. // If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) || if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) { !heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap())) {
return AllocationResult::Failure(); return AllocationResult::Failure();
} }
......
...@@ -31,10 +31,6 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) { ...@@ -31,10 +31,6 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value); BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
if (is_shared_heap_ != target_page->InSharedHeap()) return false; if (is_shared_heap_ != target_page->InSharedHeap()) return false;
if (WhiteToGreyAndPush(value)) { if (WhiteToGreyAndPush(value)) {
if (is_main_thread_barrier_) {
incremental_marking_->RestartIfNotMarking();
}
if (V8_UNLIKELY(FLAG_track_retaining_path)) { if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value); heap_->AddRetainingRoot(Root::kWriteBarrier, value);
} }
......
...@@ -46,8 +46,6 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot, ...@@ -46,8 +46,6 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
void MarkingBarrier::WriteWithoutHost(HeapObject value) { void MarkingBarrier::WriteWithoutHost(HeapObject value) {
DCHECK(is_main_thread_barrier_); DCHECK(is_main_thread_barrier_);
if (WhiteToGreyAndPush(value)) { if (WhiteToGreyAndPush(value)) {
incremental_marking_->RestartIfNotMarking();
if (V8_UNLIKELY(FLAG_track_retaining_path)) { if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value); heap_->AddRetainingRoot(Root::kWriteBarrier, value);
} }
......
...@@ -1013,7 +1013,8 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes, ...@@ -1013,7 +1013,8 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
} }
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap()) &&
heap()->CanExpandOldGeneration(AreaSize())) { heap()->CanExpandOldGeneration(AreaSize())) {
if (TryExpand(size_in_bytes, origin)) { if (TryExpand(size_in_bytes, origin)) {
return true; return true;
......
...@@ -1777,12 +1777,13 @@ TEST(TestInternalWeakLists) { ...@@ -1777,12 +1777,13 @@ TEST(TestInternalWeakLists) {
// and hence are incompatible with this test case. // and hence are incompatible with this test case.
if (FLAG_gc_global || FLAG_stress_compaction || if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking || FLAG_single_generation || FLAG_stress_incremental_marking || FLAG_single_generation ||
FLAG_separate_gc_phases) FLAG_separate_gc_phases || FLAG_stress_concurrent_allocation)
return; return;
FLAG_retain_maps_for_n_gc = 0; FLAG_retain_maps_for_n_gc = 0;
static const int kNumTestContexts = 10; static const int kNumTestContexts = 10;
ManualGCScope manual_gc_scope;
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate); HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts]; v8::Local<v8::Context> ctx[kNumTestContexts];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment