Commit 89f8435e authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Simplify finalization of incremental marking

IM::Finalize() was merely finishing marking through the incremental
marking in the atomic pause. Avoid the Hurry() call since the marking
worklists would anyways be drained with parallel marking.

Bug: v8:12775
Change-Id: Ice72a8bb5f900368eadec7f62bf18e03d568454b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3574547Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79831}
parent e36e6a88
...@@ -574,31 +574,9 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms, ...@@ -574,31 +574,9 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
: StepResult::kMoreWorkRemaining; : StepResult::kMoreWorkRemaining;
} }
void IncrementalMarking::Hurry() { bool IncrementalMarking::Stop() {
if (!local_marking_worklists()->IsEmpty()) { if (IsStopped()) return false;
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
}
}
collector_->ProcessMarkingWorklist(0);
SetState(COMPLETE);
if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (hurry), spent %d ms.\n",
static_cast<int>(delta));
}
}
}
}
void IncrementalMarking::Stop() {
if (IsStopped()) return;
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
int old_generation_size_mb = int old_generation_size_mb =
static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB); static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
...@@ -626,21 +604,16 @@ void IncrementalMarking::Stop() { ...@@ -626,21 +604,16 @@ void IncrementalMarking::Stop() {
FinishBlackAllocation(); FinishBlackAllocation();
// Merge live bytes counters of background threads // Merge live bytes counters of background threads
for (auto pair : background_live_bytes_) { for (const auto& pair : background_live_bytes_) {
MemoryChunk* memory_chunk = pair.first; MemoryChunk* memory_chunk = pair.first;
intptr_t live_bytes = pair.second; intptr_t live_bytes = pair.second;
if (live_bytes) { if (live_bytes) {
marking_state()->IncrementLiveBytes(memory_chunk, live_bytes); marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
} }
} }
background_live_bytes_.clear(); background_live_bytes_.clear();
}
void IncrementalMarking::Finalize() { return true;
Hurry();
Stop();
} }
void IncrementalMarking::FinalizeMarking(CompletionAction action) { void IncrementalMarking::FinalizeMarking(CompletionAction action) {
......
...@@ -135,18 +135,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -135,18 +135,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool WasActivated(); bool WasActivated();
void Start(GarbageCollectionReason gc_reason); void Start(GarbageCollectionReason gc_reason);
// Returns true if incremental marking was running and false otherwise.
bool Stop();
void FinalizeIncrementally(); void FinalizeIncrementally();
void UpdateMarkingWorklistAfterYoungGenGC(); void UpdateMarkingWorklistAfterYoungGenGC();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space); void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
void Finalize();
void Stop();
void FinalizeMarking(CompletionAction action); void FinalizeMarking(CompletionAction action);
void MarkingComplete(CompletionAction action); void MarkingComplete(CompletionAction action);
......
...@@ -966,8 +966,6 @@ void MarkCompactCollector::AbortCompaction() { ...@@ -966,8 +966,6 @@ void MarkCompactCollector::AbortCompaction() {
} }
void MarkCompactCollector::Prepare() { void MarkCompactCollector::Prepare() {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
#ifdef DEBUG #ifdef DEBUG
DCHECK(state_ == IDLE); DCHECK(state_ == IDLE);
state_ = PREPARE_GC; state_ = PREPARE_GC;
...@@ -975,7 +973,7 @@ void MarkCompactCollector::Prepare() { ...@@ -975,7 +973,7 @@ void MarkCompactCollector::Prepare() {
DCHECK(!sweeping_in_progress()); DCHECK(!sweeping_in_progress());
if (!was_marked_incrementally_) { if (!heap()->incremental_marking()->IsMarking()) {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
auto embedder_flags = heap_->flags_for_embedder_tracer(); auto embedder_flags = heap_->flags_for_embedder_tracer();
...@@ -2358,14 +2356,12 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2358,14 +2356,12 @@ void MarkCompactCollector::MarkLiveObjects() {
// with the C stack limit check. // with the C stack limit check.
PostponeInterruptsScope postpone(isolate()); PostponeInterruptsScope postpone(isolate());
bool was_marked_incrementally = false;
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
IncrementalMarking* incremental_marking = heap_->incremental_marking(); if (heap_->incremental_marking()->Stop()) {
if (was_marked_incrementally_) {
incremental_marking->Finalize();
MarkingBarrier::PublishAll(heap()); MarkingBarrier::PublishAll(heap());
} else { was_marked_incrementally = true;
CHECK(incremental_marking->IsStopped());
} }
} }
...@@ -2475,7 +2471,11 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2475,7 +2471,11 @@ void MarkCompactCollector::MarkLiveObjects() {
&IsUnmarkedHeapObject); &IsUnmarkedHeapObject);
} }
} }
if (was_marked_incrementally_) {
if (was_marked_incrementally) {
// Disable the marking barrier after concurrent/parallel marking has
// finished as it will reset page flags that share the same bitmap as
// the evacuation candidate bit.
MarkingBarrier::DeactivateAll(heap()); MarkingBarrier::DeactivateAll(heap());
GlobalHandles::DisableMarkingBarrier(heap()->isolate()); GlobalHandles::DisableMarkingBarrier(heap()->isolate());
} }
......
...@@ -785,7 +785,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -785,7 +785,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
const bool is_shared_heap_; const bool is_shared_heap_;
bool was_marked_incrementally_ = false;
bool evacuation_ = false; bool evacuation_ = false;
// True if we are collecting slots to perform evacuation from evacuation // True if we are collecting slots to perform evacuation from evacuation
// candidates. // candidates.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment