Commit 02e57873 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Delay completing marking

Delay completing marking (and thus the atomic GC pause) during JS
executions, increasing the chance to finalize the garbage collection
from a task. This is beneficial as it avoids stack scanning which is
expensive and can keep alive outdated objects in case of unified heap.

Completing will be delayed at most by some overshoot factor (10%).

In addition, the GC keeps the weighted average of previously recorded
time to incremental marking task invocations and bails out if the
task is expected to arrive too late.

Bug: chromium:1044630
Change-Id: I10e63e6aaa88d8488d4415f311016dce2b4e62a2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2030906
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66107}
parent 94723c19
...@@ -931,6 +931,19 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) { ...@@ -931,6 +931,19 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
} }
} }
void GCTracer::RecordTimeToIncrementalMarkingTask(double time_to_task) {
if (average_time_to_incremental_marking_task_ == 0.0) {
average_time_to_incremental_marking_task_ = time_to_task;
} else {
average_time_to_incremental_marking_task_ =
(average_time_to_incremental_marking_task_ + time_to_task) / 2;
}
}
double GCTracer::AverageTimeToIncrementalMarkingTask() const {
return average_time_to_incremental_marking_task_;
}
void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) { void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) {
if (duration == 0 || bytes == 0) return; if (duration == 0 || bytes == 0) return;
double current_speed = bytes / duration; double current_speed = bytes / duration;
......
...@@ -353,6 +353,11 @@ class V8_EXPORT_PRIVATE GCTracer { ...@@ -353,6 +353,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordEmbedderSpeed(size_t bytes, double duration); void RecordEmbedderSpeed(size_t bytes, double duration);
// Returns the average time between scheduling and invocation of an
// incremental marking task.
double AverageTimeToIncrementalMarkingTask() const;
void RecordTimeToIncrementalMarkingTask(double time_to_task);
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats(); WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
private: private:
...@@ -446,6 +451,8 @@ class V8_EXPORT_PRIVATE GCTracer { ...@@ -446,6 +451,8 @@ class V8_EXPORT_PRIVATE GCTracer {
double recorded_incremental_marking_speed_; double recorded_incremental_marking_speed_;
double average_time_to_incremental_marking_task_ = 0.0;
double recorded_embedder_speed_ = 0.0; double recorded_embedder_speed_ = 0.0;
// Incremental scopes carry more information than just the duration. The infos // Incremental scopes carry more information than just the duration. The infos
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h" #include "src/execution/vm-state-inl.h"
#include "src/heap/embedder-tracing.h" #include "src/heap/embedder-tracing.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
...@@ -51,29 +52,25 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) { ...@@ -51,29 +52,25 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
SetTaskPending(task_type, true); SetTaskPending(task_type, true);
auto taskrunner = auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate); V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
const EmbedderHeapTracer::EmbedderStackState stack_state =
taskrunner->NonNestableTasksEnabled()
? EmbedderHeapTracer::EmbedderStackState::kEmpty
: EmbedderHeapTracer::EmbedderStackState::kUnknown;
auto task =
std::make_unique<Task>(heap->isolate(), this, stack_state, task_type);
if (task_type == TaskType::kNormal) { if (task_type == TaskType::kNormal) {
scheduled_time_ = heap->MonotonicallyIncreasingTimeInMs();
if (taskrunner->NonNestableTasksEnabled()) { if (taskrunner->NonNestableTasksEnabled()) {
taskrunner->PostNonNestableTask(std::make_unique<Task>( taskrunner->PostNonNestableTask(std::move(task));
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
} else { } else {
taskrunner->PostTask(std::make_unique<Task>( taskrunner->PostTask(std::move(task));
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
} }
} else { } else {
if (taskrunner->NonNestableDelayedTasksEnabled()) { if (taskrunner->NonNestableDelayedTasksEnabled()) {
taskrunner->PostNonNestableDelayedTask( taskrunner->PostNonNestableDelayedTask(std::move(task),
std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type),
kDelayInSeconds); kDelayInSeconds);
} else { } else {
taskrunner->PostDelayedTask( taskrunner->PostDelayedTask(std::move(task), kDelayInSeconds);
std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
kDelayInSeconds);
} }
} }
} }
...@@ -98,6 +95,11 @@ void IncrementalMarkingJob::Task::RunInternal() { ...@@ -98,6 +95,11 @@ void IncrementalMarkingJob::Task::RunInternal() {
Heap* heap = isolate()->heap(); Heap* heap = isolate()->heap();
EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(), EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
stack_state_); stack_state_);
if (task_type_ == TaskType::kNormal) {
heap->tracer()->RecordTimeToIncrementalMarkingTask(
heap->MonotonicallyIncreasingTimeInMs() - job_->scheduled_time_);
job_->scheduled_time_ = 0.0;
}
IncrementalMarking* incremental_marking = heap->incremental_marking(); IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) { if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() != if (heap->IncrementalMarkingLimitReached() !=
...@@ -122,5 +124,11 @@ void IncrementalMarkingJob::Task::RunInternal() { ...@@ -122,5 +124,11 @@ void IncrementalMarkingJob::Task::RunInternal() {
} }
} }
double IncrementalMarkingJob::CurrentTimeToTask(Heap* heap) const {
if (scheduled_time_ == 0.0) return 0.0;
return heap->MonotonicallyIncreasingTimeInMs() - scheduled_time_;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -16,7 +16,7 @@ class Isolate; ...@@ -16,7 +16,7 @@ class Isolate;
// The incremental marking job uses platform tasks to perform incremental // The incremental marking job uses platform tasks to perform incremental
// marking steps. The job posts a foreground task that makes a small (~1ms) // marking steps. The job posts a foreground task that makes a small (~1ms)
// step and posts another task until the marking is completed. // step and posts another task until the marking is completed.
class IncrementalMarkingJob { class IncrementalMarkingJob final {
public: public:
enum class TaskType { kNormal, kDelayed }; enum class TaskType { kNormal, kDelayed };
...@@ -26,14 +26,17 @@ class IncrementalMarkingJob { ...@@ -26,14 +26,17 @@ class IncrementalMarkingJob {
void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal); void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal);
private: double CurrentTimeToTask(Heap* heap) const;
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
bool IsTaskPending(TaskType task_type) { bool IsTaskPending(TaskType task_type) const {
return task_type == TaskType::kNormal ? normal_task_pending_ return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_; : delayed_task_pending_;
} }
private:
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
void SetTaskPending(TaskType task_type, bool value) { void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) { if (task_type == TaskType::kNormal) {
normal_task_pending_ = value; normal_task_pending_ = value;
...@@ -42,6 +45,7 @@ class IncrementalMarkingJob { ...@@ -42,6 +45,7 @@ class IncrementalMarkingJob {
} }
} }
double scheduled_time_ = 0.0;
bool normal_task_pending_ = false; bool normal_task_pending_ = false;
bool delayed_task_pending_ = false; bool delayed_task_pending_ = false;
}; };
......
...@@ -51,16 +51,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap, ...@@ -51,16 +51,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap,
: heap_(heap), : heap_(heap),
collector_(heap->mark_compact_collector()), collector_(heap->mark_compact_collector()),
weak_objects_(weak_objects), weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_(0),
scheduled_bytes_to_mark_(0),
schedule_update_time_ms_(0),
bytes_marked_concurrently_(0),
is_compacting_(false),
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold), new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) { old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
SetState(STOPPED); SetState(STOPPED);
...@@ -285,6 +275,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) { ...@@ -285,6 +275,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
heap_->tracer()->NotifyIncrementalMarkingStart(); heap_->tracer()->NotifyIncrementalMarkingStart();
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs(); start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
time_to_force_completion_ = 0.0;
initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects(); initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter(); old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
bytes_marked_ = 0; bytes_marked_ = 0;
...@@ -792,8 +783,61 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) { ...@@ -792,8 +783,61 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) {
} }
} }
double IncrementalMarking::CurrentTimeToMarkingTask() const {
const double recorded_time_to_marking_task =
heap_->tracer()->AverageTimeToIncrementalMarkingTask();
const double current_time_to_marking_task =
incremental_marking_job_.CurrentTimeToTask(heap_);
return Max(recorded_time_to_marking_task, current_time_to_marking_task);
}
void IncrementalMarking::MarkingComplete(CompletionAction action) { void IncrementalMarking::MarkingComplete(CompletionAction action) {
// Allowed overshoot percantage of incremental marking walltime.
constexpr double kAllowedOvershoot = 0.1;
// Minimum overshoot in ms. This is used to allow moving away from stack when
// marking was fast.
constexpr double kMinOvershootMs = 50;
if (action == GC_VIA_STACK_GUARD) {
if (time_to_force_completion_ == 0.0) {
const double now = heap_->MonotonicallyIncreasingTimeInMs();
const double overshoot_ms =
Max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
const double time_to_marking_task = CurrentTimeToMarkingTask();
if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Not delaying marking completion. time to "
"task: %fms allowed overshoot: %fms\n",
time_to_marking_task, overshoot_ms);
}
} else {
time_to_force_completion_ = now + overshoot_ms;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying GC via stack guard. time to task: "
"%fms "
"allowed overshoot: %fms\n",
time_to_marking_task, overshoot_ms);
}
// Assuming kAllowedOvershoot > 0.
DCHECK(incremental_marking_job_.IsTaskPending(
IncrementalMarkingJob::TaskType::kNormal));
return;
}
}
if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying GC via stack guard. time left: "
"%fms\n",
time_to_force_completion_ -
heap_->MonotonicallyIncreasingTimeInMs());
}
return;
}
}
SetState(COMPLETE); SetState(COMPLETE);
// We will set the stack guard to request a GC now. This will mean the rest // We will set the stack guard to request a GC now. This will mean the rest
// of the GC gets performed as soon as possible (we can't do a GC here in a // of the GC gets performed as soon as possible (we can't do a GC here in a
...@@ -809,7 +853,6 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) { ...@@ -809,7 +853,6 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
} }
} }
void IncrementalMarking::Epilogue() { void IncrementalMarking::Epilogue() {
was_activated_ = false; was_activated_ = false;
finalize_marking_completed_ = false; finalize_marking_completed_ = false;
......
...@@ -26,7 +26,7 @@ enum class StepResult { ...@@ -26,7 +26,7 @@ enum class StepResult {
kWaitingForFinalization kWaitingForFinalization
}; };
class V8_EXPORT_PRIVATE IncrementalMarking { class V8_EXPORT_PRIVATE IncrementalMarking final {
public: public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE }; enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
...@@ -298,31 +298,34 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -298,31 +298,34 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
heap_->SetIsMarkingFlag(s >= MARKING); heap_->SetIsMarkingFlag(s >= MARKING);
} }
double CurrentTimeToMarkingTask() const;
Heap* const heap_; Heap* const heap_;
MarkCompactCollector* const collector_; MarkCompactCollector* const collector_;
WeakObjects* weak_objects_; WeakObjects* weak_objects_;
double start_time_ms_; double start_time_ms_ = 0.0;
size_t initial_old_generation_size_; double time_to_force_completion_ = 0.0;
size_t old_generation_allocation_counter_; size_t initial_old_generation_size_ = 0;
size_t bytes_marked_; size_t old_generation_allocation_counter_ = 0;
size_t scheduled_bytes_to_mark_; size_t bytes_marked_ = 0;
double schedule_update_time_ms_; size_t scheduled_bytes_to_mark_ = 0;
double schedule_update_time_ms_ = 0.0;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last // A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating // incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking. // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_; size_t bytes_marked_concurrently_ = 0;
// Must use SetState() above to update state_ // Must use SetState() above to update state_
State state_; State state_;
bool is_compacting_; bool is_compacting_ = false;
bool was_activated_; bool was_activated_ = false;
bool black_allocation_; bool black_allocation_ = false;
bool finalize_marking_completed_; bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_; IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_; GCRequestType request_type_ = NONE;
Observer new_generation_observer_; Observer new_generation_observer_;
Observer old_generation_observer_; Observer old_generation_observer_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment