Commit 4e815bd6 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove COMPLETE state from IncrementalMarking

This CL removes the COMPLETE state from incremental marking. Since
then the only states left were STOPPED and MARKING, we can replace
the state with an is_running_ boolean field.

The state could change back-and-forth between MARKING and COMPLETE.
IsMarking() was already also checking for COMPLETE. So most code
already treated both states the same. IsComplete() now checks whether
marking is running and a transitive closure was reached already.

IncrementalMarking::Step() didn't process the marking queue when in
COMPLETE. This should be relatively rare though since it only
transitioned into COMPLETE when the stack guard was armed and the
allocation observer ran again before reaching a stack guard check.

Bug: v8:12775
Change-Id: Ied48d8c512ad3d1b3d2e29393d43b434b5fda8fe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3835689Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82581}
parent cb9bf85b
......@@ -3770,9 +3770,7 @@ size_t Heap::NewSpaceCapacity() {
void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsComplete() ||
(incremental_marking()->IsMarking() &&
incremental_marking()->ShouldFinalize())) {
if (incremental_marking()->IsComplete()) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
}
}
......@@ -5403,7 +5401,7 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
if (ShouldOptimizeForLoadTime()) return true;
if (incremental_marking()->IsComplete()) {
if (IsMarkingComplete(local_heap)) {
return !AllocationLimitOvershotByLargeMargin();
}
......@@ -5425,6 +5423,11 @@ bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
return local_heap->main_thread_parked_;
}
bool Heap::IsMarkingComplete(LocalHeap* local_heap) {
if (!local_heap || !local_heap->is_main_thread()) return false;
return incremental_marking()->IsComplete();
}
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
......
......@@ -2029,6 +2029,7 @@ class Heap {
LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
bool IsMainThreadParked(LocalHeap* local_heap);
bool IsMarkingComplete(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode();
......
......@@ -30,16 +30,6 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
}
}
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Restarting (new grey objects)\n");
}
}
}
} // namespace internal
} // namespace v8
......
......@@ -57,9 +57,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
old_generation_observer_(this, kOldGenerationAllocatedThreshold),
marking_state_(heap->isolate()),
atomic_marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()) {
SetState(STOPPED);
}
non_atomic_marking_state_(heap->isolate()) {}
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
......@@ -146,7 +144,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
: global_limit_mb - global_size_mb);
}
DCHECK(FLAG_incremental_marking);
DCHECK_EQ(state_, STOPPED);
DCHECK(IsStopped());
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled());
......@@ -277,7 +275,8 @@ void IncrementalMarking::StartMarking() {
collector_->StartMarking();
SetState(MARKING);
is_marking_ = true;
heap_->SetIsMarkingFlag(true);
MarkingBarrier::ActivateAll(heap(), is_compacting_);
GlobalHandles::EnableMarkingBarrier(heap()->isolate());
......@@ -523,7 +522,8 @@ bool IncrementalMarking::Stop() {
collection_requested_via_stack_guard_ = false;
heap_->isolate()->stack_guard()->ClearGC();
SetState(STOPPED);
is_marking_ = false;
heap_->SetIsMarkingFlag(false);
is_compacting_ = false;
FinishBlackAllocation();
......@@ -549,36 +549,6 @@ double IncrementalMarking::CurrentTimeToMarkingTask() const {
return std::max(recorded_time_to_marking_task, current_time_to_marking_task);
}
void IncrementalMarking::TryMarkingComplete(StepOrigin step_origin) {
switch (step_origin) {
case StepOrigin::kTask:
MarkingComplete();
break;
case StepOrigin::kV8:
// The caller (e.g. allocation observer) cannot finalize marking. Schedule
// a completion task instead.
if (ShouldWaitForTask()) {
// Stay in MARKING state while waiting for the completion task. This
// ensures that subsequent Step() invocations will still perform
// marking.
} else {
// When task isn't run soon enough, fall back to stack guard to force
// completion.
MarkingComplete();
collection_requested_via_stack_guard_ = true;
heap_->isolate()->stack_guard()->RequestGC();
}
break;
default:
UNREACHABLE();
}
}
bool IncrementalMarking::ShouldWaitForTask() {
if (!completion_task_scheduled_) {
incremental_marking_job_.ScheduleTask(
......@@ -641,17 +611,8 @@ bool IncrementalMarking::TryInitializeTaskTimeout() {
}
}
void IncrementalMarking::MarkingComplete() {
SetState(COMPLETE);
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (normal).\n");
}
}
bool IncrementalMarking::ShouldDoEmbedderStep() {
return state_ == MARKING && FLAG_incremental_marking_wrappers &&
return IsRunning() && FLAG_incremental_marking_wrappers &&
heap_->local_embedder_heap_tracer()->InUse();
}
......@@ -707,12 +668,14 @@ void IncrementalMarking::AdvanceForTesting(double max_step_size_in_ms) {
void IncrementalMarking::AdvanceOnAllocation() {
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
DCHECK(FLAG_incremental_marking);
DCHECK(IsRunning());
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (state_ != MARKING || heap_->always_allocate()) {
if (heap_->always_allocate()) {
return;
}
NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
......@@ -720,6 +683,17 @@ void IncrementalMarking::AdvanceOnAllocation() {
ThreadKind::kMain);
ScheduleBytesToMarkBasedOnAllocation();
Step(kMaxStepSizeInMs, StepOrigin::kV8);
if (IsComplete()) {
// Marking cannot be finalized here. Schedule a completion task instead.
if (!ShouldWaitForTask()) {
// When task isn't run soon enough, fall back to stack guard to force
// completion.
collection_requested_via_stack_guard_ = true;
heap_->isolate()->stack_guard()->RequestGC();
}
}
}
void IncrementalMarking::AdvanceWithDeadline(StepOrigin step_origin) {
......@@ -836,93 +810,89 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
void IncrementalMarking::Step(double max_step_size_in_ms,
StepOrigin step_origin) {
DCHECK(IsRunning());
double start = heap_->MonotonicallyIncreasingTimeInMs();
size_t bytes_to_process = 0;
size_t v8_bytes_processed = 0;
double embedder_duration = 0.0;
double embedder_deadline = 0.0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
local_marking_worklists()->MergeOnHold();
}
if (FLAG_concurrent_marking) {
// It is safe to merge back all objects that were on hold to the shared
// work list at Step because we are at a safepoint where all objects
// are properly initialized.
local_marking_worklists()->MergeOnHold();
}
// Only print marking worklist in debug mode to save ~40KB of code size.
#ifdef DEBUG
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
collector_->marking_worklists()->Print();
}
if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
FLAG_trace_gc_verbose) {
collector_->marking_worklists()->Print();
}
#endif
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marking speed %.fKB/ms\n",
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
}
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
const double marking_speed =
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
max_step_size_in_ms, marking_speed);
bytes_to_process =
std::min(ComputeStepSizeInBytes(step_origin), max_step_size);
bytes_to_process = std::max({bytes_to_process, kMinStepSizeInBytes});
// Perform a single V8 and a single embedder step. In case both have been
// observed as empty back to back, we can finalize.
//
// This ignores that case where the embedder finds new V8-side objects. The
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
std::tie(v8_bytes_processed, std::ignore) =
collector_->ProcessMarkingWorklist(bytes_to_process);
StepResult v8_result = local_marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining;
StepResult embedder_result = StepResult::kNoImmediateWork;
if (heap_->local_embedder_heap_tracer()->InUse()) {
embedder_deadline =
std::min(max_step_size_in_ms,
static_cast<double>(bytes_to_process) / marking_speed);
// TODO(chromium:1056170): Replace embedder_deadline with bytes_to_process
// after migrating blink to the cppgc library and after v8 can directly
// push objects to Oilpan.
embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
}
bytes_marked_ += v8_bytes_processed;
if (v8_result == StepResult::kNoImmediateWork &&
embedder_result == StepResult::kNoImmediateWork) {
// TODO(v8:12775): Try to remove.
FastForwardSchedule();
TryMarkingComplete(step_origin);
}
if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleJobIfNeeded();
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marking speed %.fKB/ms\n",
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
}
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
const double marking_speed =
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
max_step_size_in_ms, marking_speed);
bytes_to_process =
std::min(ComputeStepSizeInBytes(step_origin), max_step_size);
bytes_to_process = std::max({bytes_to_process, kMinStepSizeInBytes});
// Perform a single V8 and a single embedder step. In case both have been
// observed as empty back to back, we can finalize.
//
// This ignores that case where the embedder finds new V8-side objects. The
// assumption is that large graphs are well connected and can mostly be
// processed on their own. For small graphs, helping is not necessary.
std::tie(v8_bytes_processed, std::ignore) =
collector_->ProcessMarkingWorklist(bytes_to_process);
StepResult v8_result = local_marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining;
StepResult embedder_result = StepResult::kNoImmediateWork;
if (heap_->local_embedder_heap_tracer()->InUse()) {
embedder_deadline =
std::min(max_step_size_in_ms,
static_cast<double>(bytes_to_process) / marking_speed);
// TODO(chromium:1056170): Replace embedder_deadline with bytes_to_process
// after migrating blink to the cppgc library and after v8 can directly
// push objects to Oilpan.
embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
}
bytes_marked_ += v8_bytes_processed;
if (v8_result == StepResult::kNoImmediateWork &&
embedder_result == StepResult::kNoImmediateWork) {
// TODO(v8:12775): Try to remove.
FastForwardSchedule();
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms "
"(%fms) "
"in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
}
if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleJobIfNeeded();
}
if (state_ == MARKING) {
const double v8_duration =
heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
const double current_time = heap_->MonotonicallyIncreasingTimeInMs();
const double v8_duration = current_time - start - embedder_duration;
heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms "
"(%fms) "
"in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
embedder_deadline, current_time - start);
}
}
......
......@@ -42,8 +42,6 @@ enum class StepResult {
class V8_EXPORT_PRIVATE IncrementalMarking final {
public:
enum State : uint8_t { STOPPED, MARKING, COMPLETE };
class V8_NODISCARD PauseBlackAllocationScope {
public:
explicit PauseBlackAllocationScope(IncrementalMarking* marking)
......@@ -88,8 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
V8_INLINE void TransferColor(HeapObject from, HeapObject to);
V8_INLINE void RestartIfNotMarking();
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; }
......@@ -100,10 +96,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void NotifyLeftTrimming(HeapObject from, HeapObject to);
bool IsStopped() const { return state() == STOPPED; }
bool IsRunning() const { return !IsStopped(); }
bool IsMarking() const { return state() >= MARKING; }
bool IsComplete() const { return state() == COMPLETE; }
bool IsStopped() const { return !IsRunning(); }
bool IsRunning() const { return is_marking_; }
bool IsMarking() const { return IsRunning(); }
bool IsComplete() const { return IsMarking() && ShouldFinalize(); }
bool CollectionRequested() const {
return collection_requested_via_stack_guard_;
......@@ -209,9 +205,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// bytes and already marked bytes.
size_t ComputeStepSizeInBytes(StepOrigin step_origin);
void TryMarkingComplete(StepOrigin step_origin);
void MarkingComplete();
bool ShouldWaitForTask();
bool TryInitializeTaskTimeout();
......@@ -228,16 +221,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// from white to grey.
bool WhiteToGreyAndPush(HeapObject obj);
State state() const {
DCHECK_IMPLIES(state_ != STOPPED, FLAG_incremental_marking);
return state_;
}
void SetState(State s) {
state_ = s;
heap_->SetIsMarkingFlag(s >= MARKING);
}
double CurrentTimeToMarkingTask() const;
Heap* const heap_;
......@@ -255,11 +238,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_ = 0;
// Must use `SetState()` above to update `state_`.
// Atomic since main thread can complete marking while a background thread's
// slow allocation path will check whether incremental marking is currently
// running.
std::atomic<State> state_;
bool is_marking_ = false;
bool is_compacting_ = false;
bool black_allocation_ = false;
......
......@@ -136,7 +136,8 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
!heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap())) {
return AllocationResult::Failure();
}
......
......@@ -31,10 +31,6 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
if (is_shared_heap_ != target_page->InSharedHeap()) return false;
if (WhiteToGreyAndPush(value)) {
if (is_main_thread_barrier_) {
incremental_marking_->RestartIfNotMarking();
}
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
}
......
......@@ -46,8 +46,6 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
void MarkingBarrier::WriteWithoutHost(HeapObject value) {
DCHECK(is_main_thread_barrier_);
if (WhiteToGreyAndPush(value)) {
incremental_marking_->RestartIfNotMarking();
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
}
......
......@@ -1013,7 +1013,8 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
}
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap()) &&
heap()->CanExpandOldGeneration(AreaSize())) {
if (TryExpand(size_in_bytes, origin)) {
return true;
......
......@@ -1777,12 +1777,13 @@ TEST(TestInternalWeakLists) {
// and hence are incompatible with this test case.
if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking || FLAG_single_generation ||
FLAG_separate_gc_phases)
FLAG_separate_gc_phases || FLAG_stress_concurrent_allocation)
return;
FLAG_retain_maps_for_n_gc = 0;
static const int kNumTestContexts = 10;
ManualGCScope manual_gc_scope;
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment