Commit 562c15d6 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

Reland "heap: Delay completing marking"

Delay completing marking (and thus the atomic GC pause) during JS
executions, increasing the chance to finalize the garbage collection
from a task. This is beneficial as it avoids stack scanning which is
expensive and can keep alive outdated objects in case of unified heap.

Completing will be delayed at most by some overshoot factor (10%).

In addition, the GC keeps the weighted average of previously recorded
time to incremental marking task invocations and bails out if the
task is expected to arrive too late.

Reland: Do not schedule a delayed task when marking has already been
finalized.

This reverts commit 1775684e.

Bug: chromium:1044630, v8:10178
Change-Id: Ic46e9504ce7c200bd1d37ee8d0190fb8727a3f96
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2037436Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66137}
parent b2733d75
......@@ -931,6 +931,19 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
}
}
void GCTracer::RecordTimeToIncrementalMarkingTask(double time_to_task) {
if (average_time_to_incremental_marking_task_ == 0.0) {
average_time_to_incremental_marking_task_ = time_to_task;
} else {
average_time_to_incremental_marking_task_ =
(average_time_to_incremental_marking_task_ + time_to_task) / 2;
}
}
double GCTracer::AverageTimeToIncrementalMarkingTask() const {
return average_time_to_incremental_marking_task_;
}
void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) {
if (duration == 0 || bytes == 0) return;
double current_speed = bytes / duration;
......
......@@ -353,6 +353,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordEmbedderSpeed(size_t bytes, double duration);
// Returns the average time between scheduling and invocation of an
// incremental marking task.
double AverageTimeToIncrementalMarkingTask() const;
void RecordTimeToIncrementalMarkingTask(double time_to_task);
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
private:
......@@ -446,6 +451,8 @@ class V8_EXPORT_PRIVATE GCTracer {
double recorded_incremental_marking_speed_;
double average_time_to_incremental_marking_task_ = 0.0;
double recorded_embedder_speed_ = 0.0;
// Incremental scopes carry more information than just the duration. The infos
......
......@@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
......@@ -51,29 +52,25 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
const EmbedderHeapTracer::EmbedderStackState stack_state =
taskrunner->NonNestableTasksEnabled()
? EmbedderHeapTracer::EmbedderStackState::kEmpty
: EmbedderHeapTracer::EmbedderStackState::kUnknown;
auto task =
std::make_unique<Task>(heap->isolate(), this, stack_state, task_type);
if (task_type == TaskType::kNormal) {
scheduled_time_ = heap->MonotonicallyIncreasingTimeInMs();
if (taskrunner->NonNestableTasksEnabled()) {
taskrunner->PostNonNestableTask(std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
taskrunner->PostNonNestableTask(std::move(task));
} else {
taskrunner->PostTask(std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
taskrunner->PostTask(std::move(task));
}
} else {
if (taskrunner->NonNestableDelayedTasksEnabled()) {
taskrunner->PostNonNestableDelayedTask(
std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type),
kDelayInSeconds);
taskrunner->PostNonNestableDelayedTask(std::move(task),
kDelayInSeconds);
} else {
taskrunner->PostDelayedTask(
std::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
kDelayInSeconds);
taskrunner->PostDelayedTask(std::move(task), kDelayInSeconds);
}
}
}
......@@ -98,6 +95,11 @@ void IncrementalMarkingJob::Task::RunInternal() {
Heap* heap = isolate()->heap();
EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
stack_state_);
if (task_type_ == TaskType::kNormal) {
heap->tracer()->RecordTimeToIncrementalMarkingTask(
heap->MonotonicallyIncreasingTimeInMs() - job_->scheduled_time_);
job_->scheduled_time_ = 0.0;
}
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
......@@ -115,12 +117,21 @@ void IncrementalMarkingJob::Task::RunInternal() {
if (!incremental_marking->IsStopped()) {
StepResult step_result = Step(heap);
if (!incremental_marking->IsStopped()) {
job_->ScheduleTask(heap, step_result == StepResult::kNoImmediateWork
? TaskType::kDelayed
: TaskType::kNormal);
const TaskType task_type =
incremental_marking->finalize_marking_completed() ||
step_result != StepResult::kNoImmediateWork
? TaskType::kNormal
: TaskType::kDelayed;
job_->ScheduleTask(heap, task_type);
}
}
}
double IncrementalMarkingJob::CurrentTimeToTask(Heap* heap) const {
if (scheduled_time_ == 0.0) return 0.0;
return heap->MonotonicallyIncreasingTimeInMs() - scheduled_time_;
}
} // namespace internal
} // namespace v8
......@@ -16,7 +16,7 @@ class Isolate;
// The incremental marking job uses platform tasks to perform incremental
// marking steps. The job posts a foreground task that makes a small (~1ms)
// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
class IncrementalMarkingJob final {
public:
enum class TaskType { kNormal, kDelayed };
......@@ -26,14 +26,17 @@ class IncrementalMarkingJob {
void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal);
private:
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
double CurrentTimeToTask(Heap* heap) const;
bool IsTaskPending(TaskType task_type) {
bool IsTaskPending(TaskType task_type) const {
return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_;
}
private:
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) {
normal_task_pending_ = value;
......@@ -42,6 +45,7 @@ class IncrementalMarkingJob {
}
}
double scheduled_time_ = 0.0;
bool normal_task_pending_ = false;
bool delayed_task_pending_ = false;
};
......
......@@ -55,16 +55,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap,
: heap_(heap),
collector_(heap->mark_compact_collector()),
weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_(0),
scheduled_bytes_to_mark_(0),
schedule_update_time_ms_(0),
bytes_marked_concurrently_(0),
is_compacting_(false),
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
SetState(STOPPED);
......@@ -289,6 +279,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
heap_->tracer()->NotifyIncrementalMarkingStart();
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
time_to_force_completion_ = 0.0;
initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
bytes_marked_ = 0;
......@@ -796,8 +787,61 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) {
}
}
double IncrementalMarking::CurrentTimeToMarkingTask() const {
const double recorded_time_to_marking_task =
heap_->tracer()->AverageTimeToIncrementalMarkingTask();
const double current_time_to_marking_task =
incremental_marking_job_.CurrentTimeToTask(heap_);
return Max(recorded_time_to_marking_task, current_time_to_marking_task);
}
void IncrementalMarking::MarkingComplete(CompletionAction action) {
// Allowed overshoot percantage of incremental marking walltime.
constexpr double kAllowedOvershoot = 0.1;
// Minimum overshoot in ms. This is used to allow moving away from stack when
// marking was fast.
constexpr double kMinOvershootMs = 50;
if (action == GC_VIA_STACK_GUARD) {
if (time_to_force_completion_ == 0.0) {
const double now = heap_->MonotonicallyIncreasingTimeInMs();
const double overshoot_ms =
Max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
const double time_to_marking_task = CurrentTimeToMarkingTask();
if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Not delaying marking completion. time to "
"task: %fms allowed overshoot: %fms\n",
time_to_marking_task, overshoot_ms);
}
} else {
time_to_force_completion_ = now + overshoot_ms;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying GC via stack guard. time to task: "
"%fms "
"allowed overshoot: %fms\n",
time_to_marking_task, overshoot_ms);
}
// Assuming kAllowedOvershoot > 0.
DCHECK(incremental_marking_job_.IsTaskPending(
IncrementalMarkingJob::TaskType::kNormal));
return;
}
}
if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying GC via stack guard. time left: "
"%fms\n",
time_to_force_completion_ -
heap_->MonotonicallyIncreasingTimeInMs());
}
return;
}
}
SetState(COMPLETE);
// We will set the stack guard to request a GC now. This will mean the rest
// of the GC gets performed as soon as possible (we can't do a GC here in a
......@@ -813,7 +857,6 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
}
}
void IncrementalMarking::Epilogue() {
was_activated_ = false;
finalize_marking_completed_ = false;
......
......@@ -26,7 +26,7 @@ enum class StepResult {
kWaitingForFinalization
};
class V8_EXPORT_PRIVATE IncrementalMarking {
class V8_EXPORT_PRIVATE IncrementalMarking final {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
......@@ -298,31 +298,34 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
heap_->SetIsMarkingFlag(s >= MARKING);
}
double CurrentTimeToMarkingTask() const;
Heap* const heap_;
MarkCompactCollector* const collector_;
WeakObjects* weak_objects_;
double start_time_ms_;
size_t initial_old_generation_size_;
size_t old_generation_allocation_counter_;
size_t bytes_marked_;
size_t scheduled_bytes_to_mark_;
double schedule_update_time_ms_;
double start_time_ms_ = 0.0;
double time_to_force_completion_ = 0.0;
size_t initial_old_generation_size_ = 0;
size_t old_generation_allocation_counter_ = 0;
size_t bytes_marked_ = 0;
size_t scheduled_bytes_to_mark_ = 0;
double schedule_update_time_ms_ = 0.0;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
size_t bytes_marked_concurrently_;
size_t bytes_marked_concurrently_ = 0;
// Must use SetState() above to update state_
State state_;
bool is_compacting_;
bool was_activated_;
bool black_allocation_;
bool finalize_marking_completed_;
bool is_compacting_ = false;
bool was_activated_ = false;
bool black_allocation_ = false;
bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_;
GCRequestType request_type_ = NONE;
Observer new_generation_observer_;
Observer old_generation_observer_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment