Commit 4c65986a authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Rework incremental marking scheduling

The new scheduling reduces the main thread marking performed in
tasks and on allocation. It is based on two counters:
- bytes_marked,
- scheduled_bytes_to_mark.

The bytes_marked accounts marking done both the main thread and
the concurrent threads. The scheduled_bytes_to_mark increases based
on allocated bytes and also based on time passed since the start
of marking. The main thread steps are allowed to mark the minimal
amount if bytes_marked is greater than scheduled_bytes_to_mark.

This also changes tasks posted for marking. Before only normal
tasks were posted. Now delayed tasks are posted if the marker is
ahead of schedule.

Bug: 926189

Change-Id: I5bc9c33a5ecfc9f8d09f78d08ae277d16a2779ca
Reviewed-on: https://chromium-review.googlesource.com/c/1443056
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59433}
parent 54a18895
......@@ -328,7 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
MakeBytesAndDuration(current_.end_object_size, duration));
RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
......@@ -340,7 +340,7 @@ void GCTracer::Stop(GarbageCollector collector) {
RecordMutatorUtilization(
current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
MakeBytesAndDuration(current_.end_object_size, duration));
RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
......@@ -959,9 +959,15 @@ double GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const {
}
double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
const double kMinimumMarkingSpeed = 0.5;
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
// MarkCompact speed is more stable than incremental marking speed, because
// there might not be many incremental marking steps because of concurrent
// marking.
combined_mark_compact_speed_cache_ = MarkCompactSpeedInBytesPerMillisecond();
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
const double kMinimumMarkingSpeed = 0.5;
double speed1 = IncrementalMarkingSpeedInBytesPerMillisecond();
double speed2 = FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
......
......@@ -1251,7 +1251,7 @@ void Heap::ReportExternalMemoryPressure() {
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceIncrementalMarking(
incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
}
......@@ -3129,14 +3129,11 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
result = true;
break;
case DO_INCREMENTAL_STEP: {
const double remaining_idle_time_in_ms =
incremental_marking()->AdvanceIncrementalMarking(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
if (remaining_idle_time_in_ms > 0.0) {
FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
incremental_marking()->AdvanceWithDeadline(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
result = incremental_marking()->IsStopped();
break;
}
......
......@@ -18,15 +18,16 @@ namespace internal {
class IncrementalMarkingJob::Task : public CancelableTask {
public:
static void Step(Heap* heap,
EmbedderHeapTracer::EmbedderStackState stack_state);
static StepResult Step(Heap* heap,
EmbedderHeapTracer::EmbedderStackState stack_state);
Task(Isolate* isolate, IncrementalMarkingJob* job,
EmbedderHeapTracer::EmbedderStackState stack_state)
EmbedderHeapTracer::EmbedderStackState stack_state, TaskType task_type)
: CancelableTask(isolate),
isolate_(isolate),
job_(job),
stack_state_(stack_state) {}
stack_state_(stack_state),
task_type_(task_type) {}
// CancelableTask overrides.
void RunInternal() override;
......@@ -37,6 +38,7 @@ class IncrementalMarkingJob::Task : public CancelableTask {
Isolate* const isolate_;
IncrementalMarkingJob* const job_;
const EmbedderHeapTracer::EmbedderStackState stack_state_;
const TaskType task_type_;
};
void IncrementalMarkingJob::Start(Heap* heap) {
......@@ -44,30 +46,38 @@ void IncrementalMarkingJob::Start(Heap* heap) {
ScheduleTask(heap);
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_ && !heap->IsTearingDown()) {
void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
if (!IsTaskPending(task_type) && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
if (taskrunner->NonNestableTasksEnabled()) {
taskrunner->PostNonNestableTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty));
if (task_type == TaskType::kNormal) {
if (taskrunner->NonNestableTasksEnabled()) {
taskrunner->PostNonNestableTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
} else {
taskrunner->PostTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
}
} else {
taskrunner->PostTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown));
taskrunner->PostDelayedTask(
base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
kDelayInSeconds);
}
}
}
void IncrementalMarkingJob::Task::Step(
StepResult IncrementalMarkingJob::Task::Step(
Heap* heap, EmbedderHeapTracer::EmbedderStackState stack_state) {
const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
StepResult result = heap->incremental_marking()->AdvanceWithDeadline(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kTask);
{
......@@ -76,6 +86,7 @@ void IncrementalMarkingJob::Task::Step(
heap->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
return result;
}
void IncrementalMarkingJob::Task::RunInternal() {
......@@ -95,12 +106,14 @@ void IncrementalMarkingJob::Task::RunInternal() {
// Clear this flag after StartIncrementalMarking call to avoid
// scheduling a new task when startining incremental marking.
job_->task_pending_ = false;
job_->SetTaskPending(task_type_, false);
if (!incremental_marking->IsStopped()) {
Step(heap, stack_state_);
StepResult step_result = Step(heap, stack_state_);
if (!incremental_marking->IsStopped()) {
job_->ScheduleTask(heap);
job_->ScheduleTask(heap, step_result == StepResult::kDone
? TaskType::kDelayed
: TaskType::kNormal);
}
}
}
......
......@@ -18,18 +18,32 @@ class Isolate;
// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
public:
IncrementalMarkingJob() = default;
enum class TaskType { kNormal, kDelayed };
bool TaskPending() const { return task_pending_; }
IncrementalMarkingJob() V8_NOEXCEPT = default;
void Start(Heap* heap);
void ScheduleTask(Heap* heap);
void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal);
private:
class Task;
bool task_pending_ = false;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
bool IsTaskPending(TaskType task_type) {
return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_;
}
void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) {
normal_task_pending_ = value;
} else {
delayed_task_pending_ = value;
}
}
bool normal_task_pending_ = false;
bool delayed_task_pending_ = false;
};
} // namespace internal
} // namespace v8
......
This diff is collapsed.
......@@ -20,6 +20,7 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
enum class StepResult { kDone, kMoreWorkRemaining };
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
......@@ -70,8 +71,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
static const size_t kMinStepSizeInBytes = 64 * KB;
static const int kStepSizeInMs = 1;
static const int kMaxStepSizeInMs = 5;
static constexpr double kStepSizeInMs = 1;
static constexpr double kMaxStepSizeInMs = 5;
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
......@@ -164,21 +165,20 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void Epilogue();
// Performs incremental marking steps until deadline_in_ms is reached. It
// returns the remaining time that cannot be used for incremental marking
// anymore because a single step would exceed the deadline.
double AdvanceIncrementalMarking(double deadline_in_ms,
CompletionAction completion_action,
StepOrigin step_origin);
// Performs incremental marking steps and returns before the deadline_in_ms is
// reached. It may return earlier if the marker is already ahead of the
// marking schedule, which is indicated with StepResult::kDone.
StepResult AdvanceWithDeadline(double deadline_in_ms,
CompletionAction completion_action,
StepOrigin step_origin);
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin);
void StepOnAllocation(size_t bytes_to_process, double max_step_size);
StepResult V8Step(double max_step_size_in_ms, CompletionAction action,
StepOrigin step_origin);
bool ShouldDoEmbedderStep();
void EmbedderStep(double duration);
StepResult EmbedderStep(double duration);
inline void RestartIfNotMarking();
......@@ -290,12 +290,30 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// Visits the object and returns its size.
V8_INLINE int VisitObject(Map map, HeapObject obj);
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
// Updates scheduled_bytes_to_mark_ to ensure marking progress based on
// time.
void ScheduleBytesToMarkBasedOnTime(double time_ms);
// Updates scheduled_bytes_to_mark_ to ensure marking progress based on
// allocations.
void ScheduleBytesToMarkBasedOnAllocation();
// Helper functions for ScheduleBytesToMarkBasedOnAllocation.
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
void AddScheduledBytesToMark(size_t bytes_to_mark);
// Schedules more bytes to mark so that the marker is no longer ahead
// of schedule.
void FastForwardSchedule();
void FastForwardScheduleIfCloseToFinalization();
// Fetches marked byte counters from the concurrent marker.
void FetchBytesMarkedConcurrently();
// Returns the bytes to mark in the current step based on the scheduled
// bytes and already marked bytes.
size_t ComputeStepSizeInBytes(StepOrigin step_origin);
void AdvanceOnAllocation();
void SetState(State s) {
state_ = s;
......@@ -309,8 +327,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
double start_time_ms_;
size_t initial_old_generation_size_;
size_t old_generation_allocation_counter_;
size_t bytes_allocated_;
size_t bytes_marked_ahead_of_schedule_;
size_t bytes_marked_;
size_t scheduled_bytes_to_mark_;
double schedule_update_time_ms_;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
......@@ -325,7 +344,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool was_activated_;
bool black_allocation_;
bool finalize_marking_completed_;
bool trace_wrappers_toggle_;
IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_;
......
......@@ -86,7 +86,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
const int kIncrementalMarkingDelayMs = 500;
double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
kIncrementalMarkingDelayMs;
heap()->incremental_marking()->AdvanceIncrementalMarking(
heap()->incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(
......
......@@ -153,6 +153,7 @@ void SimulateFullSpace(v8::internal::NewSpace* space,
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
const double kStepSizeInMs = 100;
CHECK(FLAG_incremental_marking);
i::IncrementalMarking* marking = heap->incremental_marking();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
......@@ -171,8 +172,8 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
if (!force_completion) return;
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......
......@@ -2269,11 +2269,12 @@ TEST(InstanceOfStubWriteBarrier) {
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
const double kStepSizeInMs = 100;
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
}
CHECK(marking->IsMarking());
......@@ -2364,9 +2365,10 @@ TEST(IdleNotificationFinishMarking) {
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
const double kStepSizeInMs = 100;
do {
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
} while (
!CcTest::heap()->mark_compact_collector()->marking_worklist()->IsEmpty());
......@@ -3577,8 +3579,6 @@ TEST(LargeObjectSlotRecording) {
// Start incremental marking to active write barrier.
heap::SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD, StepOrigin::kV8);
// Create references from the large object to the object on the evacuation
// candidate.
......@@ -3588,6 +3588,8 @@ TEST(LargeObjectSlotRecording) {
CHECK(lo->get(i) == old_location);
}
heap::SimulateIncrementalMarking(heap, true);
// Move the evaucation candidate object.
CcTest::CollectAllGarbage();
......@@ -3641,9 +3643,7 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
CcTest::heap()->StartIncrementalMarking(
i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
}
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
heap::SimulateIncrementalMarking(CcTest::heap());
CHECK(marking->IsComplete() ||
marking->IsReadyToOverApproximateWeakClosure());
}
......@@ -4808,12 +4808,7 @@ TEST(Regress3631) {
Handle<JSReceiver> obj =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(JSWeakCollection::cast(*obj), isolate);
HeapObject weak_map_table = HeapObject::cast(weak_map->table());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
}
SimulateIncrementalMarking(heap);
// Stash the backing store in a handle.
Handle<Object> save(weak_map->table(), isolate);
// The following line will update the backing store.
......@@ -5391,9 +5386,11 @@ TEST(Regress598319) {
// Now we search for a state where we are in incremental marking and have
// only partially marked the large object.
const double kSmallStepSizeInMs = 0.1;
while (!marking->IsComplete()) {
marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kSmallStepSizeInMs,
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
CHECK_NE(page->progress_bar(), arr.get()->Size());
{
......@@ -5409,9 +5406,11 @@ TEST(Regress598319) {
}
// Finish marking with bigger steps to speed up test.
const double kLargeStepSizeInMs = 1000;
while (!marking->IsComplete()) {
marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kLargeStepSizeInMs,
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......@@ -5491,9 +5490,10 @@ TEST(Regress615489) {
v8::HandleScope inner(CcTest::isolate());
isolate->factory()->NewFixedArray(500, TENURED)->Size();
}
const double kStepSizeInMs = 100;
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......@@ -5550,10 +5550,11 @@ TEST(Regress631969) {
CcTest::CollectGarbage(NEW_SPACE);
// Finish incremental marking.
const double kStepSizeInMs = 100;
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
......@@ -5969,7 +5970,7 @@ HEAP_TEST(Regress670675) {
}
if (marking->IsStopped()) break;
double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
marking->AdvanceIncrementalMarking(
marking->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
DCHECK(marking->IsStopped());
......
......@@ -71,7 +71,7 @@ class MockPlatform : public TestPlatform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
UNREACHABLE();
task_ = std::move(task);
};
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
......
......@@ -59,7 +59,7 @@ class MockPlatform final : public TestPlatform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
UNREACHABLE();
tasks_.push(std::move(task));
};
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment