Commit 4e815bd6 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove COMPLETE state from IncrementalMarking

This CL removes the COMPLETE state from incremental marking. Since
then the only states left were STOPPED and MARKING, we can replace
the state with an is_running_ boolean field.

The state could change back-and-forth between MARKING and COMPLETE.
IsMarking() was already also checking for COMPLETE. So most code
already treated both states the same. IsComplete() now checks whether
marking is running and a transitive closure was reached already.

IncrementalMarking::Step() didn't process the marking queue when in
COMPLETE. This should be relatively rare though since it only
transitioned into COMPLETE when the stack guard was armed and the
allocation observer ran again before reaching a stack guard check.

Bug: v8:12775
Change-Id: Ied48d8c512ad3d1b3d2e29393d43b434b5fda8fe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3835689Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82581}
parent cb9bf85b
...@@ -3770,9 +3770,7 @@ size_t Heap::NewSpaceCapacity() { ...@@ -3770,9 +3770,7 @@ size_t Heap::NewSpaceCapacity() {
void Heap::FinalizeIncrementalMarkingIfComplete( void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) { GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsComplete() || if (incremental_marking()->IsComplete()) {
(incremental_marking()->IsMarking() &&
incremental_marking()->ShouldFinalize())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_); CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
} }
} }
...@@ -5403,7 +5401,7 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) { ...@@ -5403,7 +5401,7 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
if (ShouldOptimizeForLoadTime()) return true; if (ShouldOptimizeForLoadTime()) return true;
if (incremental_marking()->IsComplete()) { if (IsMarkingComplete(local_heap)) {
return !AllocationLimitOvershotByLargeMargin(); return !AllocationLimitOvershotByLargeMargin();
} }
...@@ -5425,6 +5423,11 @@ bool Heap::IsMainThreadParked(LocalHeap* local_heap) { ...@@ -5425,6 +5423,11 @@ bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
return local_heap->main_thread_parked_; return local_heap->main_thread_parked_;
} }
bool Heap::IsMarkingComplete(LocalHeap* local_heap) {
if (!local_heap || !local_heap->is_main_thread()) return false;
return incremental_marking()->IsComplete();
}
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() { Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) { if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal; return Heap::HeapGrowingMode::kMinimal;
......
...@@ -2029,6 +2029,7 @@ class Heap { ...@@ -2029,6 +2029,7 @@ class Heap {
LocalHeap* local_heap = nullptr); LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap); bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
bool IsMainThreadParked(LocalHeap* local_heap); bool IsMainThreadParked(LocalHeap* local_heap);
bool IsMarkingComplete(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode(); HeapGrowingMode CurrentHeapGrowingMode();
......
...@@ -30,16 +30,6 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) { ...@@ -30,16 +30,6 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
} }
} }
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Restarting (new grey objects)\n");
}
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
This diff is collapsed.
...@@ -42,8 +42,6 @@ enum class StepResult { ...@@ -42,8 +42,6 @@ enum class StepResult {
class V8_EXPORT_PRIVATE IncrementalMarking final { class V8_EXPORT_PRIVATE IncrementalMarking final {
public: public:
enum State : uint8_t { STOPPED, MARKING, COMPLETE };
class V8_NODISCARD PauseBlackAllocationScope { class V8_NODISCARD PauseBlackAllocationScope {
public: public:
explicit PauseBlackAllocationScope(IncrementalMarking* marking) explicit PauseBlackAllocationScope(IncrementalMarking* marking)
...@@ -88,8 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -88,8 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
V8_INLINE void TransferColor(HeapObject from, HeapObject to); V8_INLINE void TransferColor(HeapObject from, HeapObject to);
V8_INLINE void RestartIfNotMarking();
IncrementalMarking(Heap* heap, WeakObjects* weak_objects); IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; } MarkingState* marking_state() { return &marking_state_; }
...@@ -100,10 +96,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -100,10 +96,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void NotifyLeftTrimming(HeapObject from, HeapObject to); void NotifyLeftTrimming(HeapObject from, HeapObject to);
bool IsStopped() const { return state() == STOPPED; } bool IsStopped() const { return !IsRunning(); }
bool IsRunning() const { return !IsStopped(); } bool IsRunning() const { return is_marking_; }
bool IsMarking() const { return state() >= MARKING; } bool IsMarking() const { return IsRunning(); }
bool IsComplete() const { return state() == COMPLETE; } bool IsComplete() const { return IsMarking() && ShouldFinalize(); }
bool CollectionRequested() const { bool CollectionRequested() const {
return collection_requested_via_stack_guard_; return collection_requested_via_stack_guard_;
...@@ -209,9 +205,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -209,9 +205,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// bytes and already marked bytes. // bytes and already marked bytes.
size_t ComputeStepSizeInBytes(StepOrigin step_origin); size_t ComputeStepSizeInBytes(StepOrigin step_origin);
void TryMarkingComplete(StepOrigin step_origin);
void MarkingComplete();
bool ShouldWaitForTask(); bool ShouldWaitForTask();
bool TryInitializeTaskTimeout(); bool TryInitializeTaskTimeout();
...@@ -228,16 +221,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -228,16 +221,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// from white to grey. // from white to grey.
bool WhiteToGreyAndPush(HeapObject obj); bool WhiteToGreyAndPush(HeapObject obj);
State state() const {
DCHECK_IMPLIES(state_ != STOPPED, FLAG_incremental_marking);
return state_;
}
void SetState(State s) {
state_ = s;
heap_->SetIsMarkingFlag(s >= MARKING);
}
double CurrentTimeToMarkingTask() const; double CurrentTimeToMarkingTask() const;
Heap* const heap_; Heap* const heap_;
...@@ -255,11 +238,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -255,11 +238,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking. // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_ = 0; size_t bytes_marked_concurrently_ = 0;
// Must use `SetState()` above to update `state_`. bool is_marking_ = false;
// Atomic since main thread can complete marking while a background thread's
// slow allocation path will check whether incremental marking is currently
// running.
std::atomic<State> state_;
bool is_compacting_ = false; bool is_compacting_ = false;
bool black_allocation_ = false; bool black_allocation_ = false;
......
...@@ -136,7 +136,8 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size, ...@@ -136,7 +136,8 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// Check if we want to force a GC before growing the old space further. // Check if we want to force a GC before growing the old space further.
// If so, fail the allocation. // If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) || if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) { !heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap())) {
return AllocationResult::Failure(); return AllocationResult::Failure();
} }
......
...@@ -31,10 +31,6 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) { ...@@ -31,10 +31,6 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value); BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
if (is_shared_heap_ != target_page->InSharedHeap()) return false; if (is_shared_heap_ != target_page->InSharedHeap()) return false;
if (WhiteToGreyAndPush(value)) { if (WhiteToGreyAndPush(value)) {
if (is_main_thread_barrier_) {
incremental_marking_->RestartIfNotMarking();
}
if (V8_UNLIKELY(FLAG_track_retaining_path)) { if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value); heap_->AddRetainingRoot(Root::kWriteBarrier, value);
} }
......
...@@ -46,8 +46,6 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot, ...@@ -46,8 +46,6 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
void MarkingBarrier::WriteWithoutHost(HeapObject value) { void MarkingBarrier::WriteWithoutHost(HeapObject value) {
DCHECK(is_main_thread_barrier_); DCHECK(is_main_thread_barrier_);
if (WhiteToGreyAndPush(value)) { if (WhiteToGreyAndPush(value)) {
incremental_marking_->RestartIfNotMarking();
if (V8_UNLIKELY(FLAG_track_retaining_path)) { if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value); heap_->AddRetainingRoot(Root::kWriteBarrier, value);
} }
......
...@@ -1013,7 +1013,8 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes, ...@@ -1013,7 +1013,8 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
} }
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(
heap()->main_thread_local_heap()) &&
heap()->CanExpandOldGeneration(AreaSize())) { heap()->CanExpandOldGeneration(AreaSize())) {
if (TryExpand(size_in_bytes, origin)) { if (TryExpand(size_in_bytes, origin)) {
return true; return true;
......
...@@ -1777,12 +1777,13 @@ TEST(TestInternalWeakLists) { ...@@ -1777,12 +1777,13 @@ TEST(TestInternalWeakLists) {
// and hence are incompatible with this test case. // and hence are incompatible with this test case.
if (FLAG_gc_global || FLAG_stress_compaction || if (FLAG_gc_global || FLAG_stress_compaction ||
FLAG_stress_incremental_marking || FLAG_single_generation || FLAG_stress_incremental_marking || FLAG_single_generation ||
FLAG_separate_gc_phases) FLAG_separate_gc_phases || FLAG_stress_concurrent_allocation)
return; return;
FLAG_retain_maps_for_n_gc = 0; FLAG_retain_maps_for_n_gc = 0;
static const int kNumTestContexts = 10; static const int kNumTestContexts = 10;
ManualGCScope manual_gc_scope;
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate); HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts]; v8::Local<v8::Context> ctx[kNumTestContexts];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment