Commit 12fb997a authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Simplify helping embedder tracing

Instead of alternating V8 and embedder steps and trying to find a
fixed point where both are empty, just run two phases and check
whether both have been observed as empty individually.

Bug: chromium:1044630, chromium:1049084
Change-Id: I7ffdca3086f1e7f772303272e370c4a6c1f83f24
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2041437
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66153}
parent 700665b6
...@@ -34,10 +34,6 @@ ...@@ -34,10 +34,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace {
constexpr double kMinSharableStepSizeInBytes = 4 * KB;
}
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr, void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) { size_t size) {
Heap* heap = incremental_marking_->heap(); Heap* heap = incremental_marking_->heap();
...@@ -684,14 +680,15 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms, ...@@ -684,14 +680,15 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
constexpr size_t kObjectsToProcessBeforeInterrupt = 500; constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
const double start = heap_->MonotonicallyIncreasingTimeInMs(); const double start = heap_->MonotonicallyIncreasingTimeInMs();
double deadline = start + expected_duration_ms; const double deadline = start + expected_duration_ms;
double current; double current;
bool empty_worklist; bool empty_worklist;
bool remote_tracing_done = false;
do { do {
{ {
LocalEmbedderHeapTracer::ProcessingScope scope( LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
heap_->local_embedder_heap_tracer());
HeapObject object; HeapObject object;
size_t cnt = 0; size_t cnt = 0;
empty_worklist = true; empty_worklist = true;
...@@ -704,12 +701,13 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms, ...@@ -704,12 +701,13 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
} }
} }
} }
heap_->local_embedder_heap_tracer()->Trace(deadline); remote_tracing_done = local_tracer->Trace(deadline);
current = heap_->MonotonicallyIncreasingTimeInMs(); current = heap_->MonotonicallyIncreasingTimeInMs();
} while (!empty_worklist && (current < deadline)); } while (!empty_worklist && !remote_tracing_done && (current < deadline));
heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist); local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
*duration_ms = current - start; *duration_ms = current - start;
return empty_worklist ? StepResult::kNoImmediateWork return (empty_worklist && remote_tracing_done)
? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining; : StepResult::kMoreWorkRemaining;
} }
...@@ -1086,61 +1084,32 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -1086,61 +1084,32 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
} }
// The first step after Scavenge will see many allocated bytes. // The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly. // Cap the step size to distribute the marking work more uniformly.
const double marking_speed =
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize( size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
max_step_size_in_ms, max_step_size_in_ms, marking_speed);
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size); bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
const bool only_single_step = bytes_to_process == 0;
bytes_to_process = Max(bytes_to_process, kMinStepSizeInBytes); bytes_to_process = Max(bytes_to_process, kMinStepSizeInBytes);
size_t remaining_bytes = bytes_to_process;
const bool using_embedder_tracer = // Perform a single V8 and a single embedder step. In case both have been
heap_->local_embedder_heap_tracer()->InUse(); // observed as empty back to back, we can finalize.
StepResult embedder_result, v8_result; //
do { // This ignores that case where the embedder finds new V8-side objects. The
v8_result = StepResult::kMoreWorkRemaining; // assumption is that large graphs are well connected and can mostly be
embedder_result = using_embedder_tracer ? StepResult::kMoreWorkRemaining // processed on their own. For small graphs, helping is not necessary.
: StepResult::kNoImmediateWork; v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
const size_t v8_bytes_to_process = StepResult v8_result = marking_worklists()->IsEmpty()
using_embedder_tracer && remaining_bytes > kMinSharableStepSizeInBytes
? remaining_bytes / 2
: remaining_bytes;
size_t v8_bytes_processed_step =
collector_->ProcessMarkingWorklist(v8_bytes_to_process);
v8_bytes_processed += v8_bytes_processed_step;
remaining_bytes = remaining_bytes > v8_bytes_processed_step
? remaining_bytes - v8_bytes_processed_step
: 0;
v8_result = marking_worklists()->IsEmpty()
? StepResult::kNoImmediateWork ? StepResult::kNoImmediateWork
: StepResult::kMoreWorkRemaining; : StepResult::kMoreWorkRemaining;
// Allow the embedder to make marking progress, assuming it gets a share StepResult embedder_result = StepResult::kNoImmediateWork;
// of the time for handling |bytes_to_process|. if (heap_->local_embedder_heap_tracer()->InUse()) {
if (using_embedder_tracer && remaining_bytes > 0) { embedder_deadline = static_cast<double>(bytes_to_process) / marking_speed;
const double marking_speed = embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
heap_->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
double step_duration;
double step_expected_duration = remaining_bytes / marking_speed;
embedder_result = EmbedderStep(step_expected_duration, &step_duration);
embedder_duration += step_duration;
embedder_deadline += step_expected_duration;
size_t embedder_bytes_processed_step =
Max(static_cast<size_t>(step_duration * marking_speed), size_t{1});
remaining_bytes = remaining_bytes > embedder_bytes_processed_step
? remaining_bytes - embedder_bytes_processed_step
: 0;
} else {
break;
} }
} while (!only_single_step &&
(v8_result == StepResult::kMoreWorkRemaining ||
embedder_result == StepResult::kMoreWorkRemaining) &&
(remaining_bytes > 0));
bytes_marked_ += v8_bytes_processed; bytes_marked_ += v8_bytes_processed;
combined_result = CombineStepResults(v8_result, embedder_result); combined_result = CombineStepResults(v8_result, embedder_result);
if (marking_worklists()->IsEmpty()) { if (combined_result == StepResult::kNoImmediateWork) {
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
if (!finalize_marking_completed_) { if (!finalize_marking_completed_) {
FinalizeMarking(action); FinalizeMarking(action);
FastForwardSchedule(); FastForwardSchedule();
...@@ -1151,7 +1120,6 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -1151,7 +1120,6 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
combined_result = StepResult::kWaitingForFinalization; combined_result = StepResult::kWaitingForFinalization;
} }
} }
}
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
marking_worklists()->ShareWorkIfGlobalPoolIsEmpty(); marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
heap_->concurrent_marking()->RescheduleTasksIfNeeded(); heap_->concurrent_marking()->RescheduleTasksIfNeeded();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment