Commit dc5c3ee5 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Add IncrementalMarking::AdvanceForTesting bottleneck

Introducing IncrementalMarking::AdvanceForTesting as last bottleneck
for driving incremental marking in addition to AdvanceFromTask
and AdvanceOnAllocation.

Now that we have those 3 bottlenecks, Step() and AdvanceWithDeadline()
can become private methods in IncrementalMarking. We also don't need
the StepResult return value in Step() anymore, which allows us to
remove CombineStepResult.

Bug: v8:12775
Change-Id: I702714439ef7ea4b9abf2156387503d4d00a7a48
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3823131Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82552}
parent 772f6c8b
......@@ -3772,8 +3772,7 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsComplete() ||
(incremental_marking()->IsMarking() &&
mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking())) {
incremental_marking()->ShouldFinalize())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
}
}
......
......@@ -694,34 +694,58 @@ void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
}
}
namespace {
StepResult CombineStepResults(StepResult a, StepResult b) {
if (a == StepResult::kMoreWorkRemaining ||
b == StepResult::kMoreWorkRemaining)
return StepResult::kMoreWorkRemaining;
return StepResult::kNoImmediateWork;
void IncrementalMarking::AdvanceFromTask() {
AdvanceWithDeadline(StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
void IncrementalMarking::AdvanceForTesting(double max_step_size_in_ms) {
Step(max_step_size_in_ms, StepOrigin::kV8);
}
void IncrementalMarking::AdvanceOnAllocation() {
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
DCHECK(FLAG_incremental_marking);
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (state_ != MARKING || heap_->always_allocate()) {
return;
}
NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
ThreadKind::kMain);
ScheduleBytesToMarkBasedOnAllocation();
Step(kMaxStepSizeInMs, StepOrigin::kV8);
}
} // anonymous namespace
StepResult IncrementalMarking::AdvanceWithDeadline(double deadline_in_ms,
StepOrigin step_origin) {
void IncrementalMarking::AdvanceWithDeadline(StepOrigin step_origin) {
NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch",
heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
ThreadKind::kMain);
DCHECK(!IsStopped());
DCHECK(IsRunning());
ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
FastForwardScheduleIfCloseToFinalization();
return Step(kStepSizeInMs, step_origin);
Step(kStepSizeInMs, step_origin);
}
void IncrementalMarking::AdvanceFromTask() {
AdvanceWithDeadline(0, StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
bool IncrementalMarking::ShouldFinalize() const {
DCHECK(IsRunning());
return heap()
->mark_compact_collector()
->local_marking_worklists()
->IsEmpty() &&
heap()
->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking();
}
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
......@@ -810,27 +834,10 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
}
void IncrementalMarking::AdvanceOnAllocation() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
state_ != MARKING || heap_->always_allocate()) {
return;
}
NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
ThreadKind::kMain);
ScheduleBytesToMarkBasedOnAllocation();
Step(kMaxStepSizeInMs, StepOrigin::kV8);
}
StepResult IncrementalMarking::Step(double max_step_size_in_ms,
StepOrigin step_origin) {
void IncrementalMarking::Step(double max_step_size_in_ms,
StepOrigin step_origin) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
StepResult combined_result = StepResult::kMoreWorkRemaining;
size_t bytes_to_process = 0;
size_t v8_bytes_processed = 0;
double embedder_duration = 0.0;
......@@ -887,9 +894,9 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
}
bytes_marked_ += v8_bytes_processed;
combined_result = CombineStepResults(v8_result, embedder_result);
if (combined_result == StepResult::kNoImmediateWork) {
if (v8_result == StepResult::kNoImmediateWork &&
embedder_result == StepResult::kNoImmediateWork) {
// TODO(v8:12775): Try to remove.
FastForwardSchedule();
......@@ -900,21 +907,23 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleJobIfNeeded();
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms "
"(%fms) "
"in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
}
}
if (state_ == MARKING) {
const double v8_duration =
heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
"in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
}
return combined_result;
}
} // namespace internal
......
......@@ -109,6 +109,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
return collection_requested_via_stack_guard_;
}
bool ShouldFinalize() const;
bool CanBeStarted() const;
void Start(GarbageCollectionReason gc_reason);
......@@ -118,11 +120,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void UpdateMarkingWorklistAfterYoungGenGC();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
// Performs incremental marking steps and returns before the deadline_in_ms is
// reached. It may return earlier if the marker is already ahead of the
// marking schedule, which is indicated with StepResult::kDone.
StepResult AdvanceWithDeadline(double deadline_in_ms, StepOrigin step_origin);
// Performs incremental marking step and finalizes marking if complete.
void AdvanceFromTask();
......@@ -130,8 +127,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// marking completes.
void AdvanceOnAllocation();
StepResult Step(double max_step_size_in_ms, StepOrigin step_origin);
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
......@@ -162,6 +157,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void MarkRootsForTesting();
// Performs incremental marking step for unit tests.
void AdvanceForTesting(double max_step_size_in_ms);
private:
class IncrementalMarkingRootMarkingVisitor;
......@@ -219,6 +217,13 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void MarkRoots();
// Performs incremental marking steps and returns before the deadline_in_ms is
// reached. It may return earlier if the marker is already ahead of the
// marking schedule, which is indicated with StepResult::kDone.
void AdvanceWithDeadline(StepOrigin step_origin);
void Step(double max_step_size_in_ms, StepOrigin step_origin);
// Returns true if the function succeeds in transitioning the object
// from white to grey.
bool WhiteToGreyAndPush(HeapObject obj);
......
......@@ -190,8 +190,9 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
marking->MarkRootsForTesting();
while (!marking->IsComplete()) {
marking->Step(kStepSizeInMs, i::StepOrigin::kV8);
marking->AdvanceForTesting(kStepSizeInMs);
}
CHECK(marking->IsComplete());
}
......
......@@ -2480,10 +2480,11 @@ TEST(InstanceOfStubWriteBarrier) {
MarkingState* marking_state = marking->marking_state();
const double kStepSizeInMs = 100;
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
while (!marking_state->IsBlack(f->code())) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(kStepSizeInMs, StepOrigin::kV8);
CHECK(!marking->IsComplete());
marking->AdvanceForTesting(kStepSizeInMs);
}
CHECK(marking->IsMarking());
......@@ -2576,12 +2577,9 @@ TEST(IdleNotificationFinishMarking) {
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
const double kStepSizeInMs = 100;
do {
marking->Step(kStepSizeInMs, StepOrigin::kV8);
} while (!CcTest::heap()
->mark_compact_collector()
->local_marking_worklists()
->IsEmpty());
while (!marking->IsComplete()) {
marking->AdvanceForTesting(kStepSizeInMs);
}
// The next idle notification has to finish incremental marking.
const double kLongIdleTime = 1000.0;
......@@ -5775,7 +5773,7 @@ TEST(Regress598319) {
// only partially marked the large object.
const double kSmallStepSizeInMs = 0.1;
while (!marking->IsComplete()) {
marking->Step(kSmallStepSizeInMs, StepOrigin::kV8);
marking->AdvanceForTesting(kSmallStepSizeInMs);
ProgressBar& progress_bar = page->ProgressBar();
if (progress_bar.IsEnabled() && progress_bar.Value() > 0) {
CHECK_NE(progress_bar.Value(), arr.get().Size());
......@@ -5797,7 +5795,7 @@ TEST(Regress598319) {
// Finish marking with bigger steps to speed up test.
const double kLargeStepSizeInMs = 1000;
while (!marking->IsComplete()) {
marking->Step(kLargeStepSizeInMs, StepOrigin::kV8);
marking->AdvanceForTesting(kLargeStepSizeInMs);
}
CHECK(marking->IsComplete());
......@@ -5885,7 +5883,7 @@ TEST(Regress615489) {
}
const double kStepSizeInMs = 100;
while (!marking->IsComplete()) {
marking->Step(kStepSizeInMs, StepOrigin::kV8);
marking->AdvanceForTesting(kStepSizeInMs);
}
CHECK(marking->IsComplete());
intptr_t size_before = heap->SizeOfObjects();
......@@ -5943,7 +5941,7 @@ TEST(Regress631969) {
const double kStepSizeInMs = 100;
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
marking->Step(kStepSizeInMs, StepOrigin::kV8);
marking->AdvanceForTesting(kStepSizeInMs);
}
{
......@@ -6520,8 +6518,8 @@ HEAP_TEST(Regress670675) {
AllocationType::kOld);
}
if (marking->IsStopped()) break;
double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
marking->AdvanceWithDeadline(deadline, StepOrigin::kV8);
const double kStepSizeInMs = 1.0;
marking->AdvanceForTesting(kStepSizeInMs);
}
DCHECK(marking->IsStopped());
}
......
......@@ -31,7 +31,7 @@ void HeapInternalsBase::SimulateIncrementalMarking(Heap* heap,
if (!force_completion) return;
while (!marking->IsComplete()) {
marking->Step(kStepSizeInMs, i::StepOrigin::kV8);
marking->AdvanceForTesting(kStepSizeInMs);
}
CHECK(marking->IsComplete());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment