Commit eb201d6f authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Remove idle Scavenge task and schedule regular foreground tasks.


Bug: chromium:1054771
Change-Id: I0773d60e5cd3d3b181446c11943eb19d5336b420
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2066982
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66446}
parent 645ca255
......@@ -1017,8 +1017,6 @@ DEFINE_BOOL(young_generation_large_objects, true,
"allocates large objects by default in the young generation large "
"object space")
DEFINE_BOOL(idle_time_scavenge, true, "Perform scavenges in idle time.")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
......@@ -1687,7 +1685,6 @@ DEFINE_BOOL(predictable_gc_schedule, false,
DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, min_semi_space_size, 4)
DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, max_semi_space_size, 4)
DEFINE_VALUE_IMPLICATION(predictable_gc_schedule, heap_growing_percent, 30)
DEFINE_NEG_IMPLICATION(predictable_gc_schedule, idle_time_scavenge)
DEFINE_NEG_IMPLICATION(predictable_gc_schedule, memory_reducer)
//
......
......@@ -183,13 +183,13 @@ struct Heap::StrongRootsList {
StrongRootsList* next;
};
class IdleScavengeObserver : public AllocationObserver {
class ScavengeTaskObserver : public AllocationObserver {
public:
IdleScavengeObserver(Heap* heap, intptr_t step_size)
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated);
heap_->ScheduleScavengeTaskIfNeeded();
}
private:
......@@ -1247,11 +1247,9 @@ void Heap::HandleGCRequest() {
}
}
void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
DCHECK(FLAG_idle_time_scavenge);
void Heap::ScheduleScavengeTaskIfNeeded() {
DCHECK_NOT_NULL(scavenge_job_);
scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
scavenge_job_->ScheduleTaskIfNeeded(this);
}
TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
......@@ -3966,8 +3964,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "full hash-table";
case GarbageCollectionReason::kHeapProfiler:
return "heap profiler";
case GarbageCollectionReason::kIdleTask:
return "idle task";
case GarbageCollectionReason::kTask:
return "task";
case GarbageCollectionReason::kLastResort:
return "last resort";
case GarbageCollectionReason::kLowMemoryNotification:
......@@ -5209,12 +5207,10 @@ void Heap::SetUpSpaces() {
}
#endif // ENABLE_MINOR_MC
if (FLAG_idle_time_scavenge) {
scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_.reset(new IdleScavengeObserver(
this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
scavenge_job_.reset(new ScavengeJob());
scavenge_task_observer_.reset(new ScavengeTaskObserver(
this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
new_space()->AddAllocationObserver(scavenge_task_observer_.get());
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
......@@ -5394,11 +5390,9 @@ void Heap::TearDown() {
}
}
if (FLAG_idle_time_scavenge) {
new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
idle_scavenge_observer_.reset();
scavenge_job_.reset();
}
new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
scavenge_task_observer_.reset();
scavenge_job_.reset();
if (FLAG_stress_marking > 0) {
RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
......
......@@ -127,7 +127,7 @@ enum class GarbageCollectionReason {
kFinalizeMarkingViaTask = 9,
kFullHashtable = 10,
kHeapProfiler = 11,
kIdleTask = 12,
kTask = 12,
kLastResort = 13,
kLowMemoryNotification = 14,
kMakeHeapIterable = 15,
......@@ -1557,7 +1557,6 @@ class Heap {
static const int kOldSurvivalRateLowThreshold = 10;
static const int kMaxMarkCompactsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
static const int kInitialFeedbackCapacity = 256;
......@@ -1823,7 +1822,12 @@ class Heap {
// ===========================================================================
bool RecentIdleNotificationHappened();
void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
// ===========================================================================
// GC Tasks. =================================================================
// ===========================================================================
void ScheduleScavengeTaskIfNeeded();
// ===========================================================================
// Allocation methods. =======================================================
......@@ -2105,7 +2109,7 @@ class Heap {
std::unique_ptr<ObjectStats> live_object_stats_;
std::unique_ptr<ObjectStats> dead_object_stats_;
std::unique_ptr<ScavengeJob> scavenge_job_;
std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
std::unique_ptr<AllocationObserver> scavenge_task_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
StrongRootsList* strong_roots_list_ = nullptr;
......@@ -2204,7 +2208,7 @@ class Heap {
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapObjectIterator;
friend class IdleScavengeObserver;
friend class ScavengeTaskObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class OldLargeObjectSpace;
......
......@@ -105,7 +105,7 @@ void IncrementalMarkingJob::Task::RunInternal() {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
heap->StartIncrementalMarking(heap->GCFlagsForIncrementalMarking(),
GarbageCollectionReason::kIdleTask,
GarbageCollectionReason::kTask,
kGCCallbackScheduleIdleGarbageCollection);
}
}
......
......@@ -7,112 +7,59 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/init/v8.h"
namespace v8 {
namespace internal {
class ScavengeJob::Task : public CancelableTask {
public:
Task(Isolate* isolate, ScavengeJob* job)
: CancelableTask(isolate), isolate_(isolate), job_(job) {}
const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
// CancelableTask overrides.
void RunInternal() override;
void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
VMState<GC> state(isolate());
TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
Heap* heap = isolate()->heap();
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
double start_ms = heap->MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
double scavenge_speed_in_bytes_per_ms =
heap->tracer()->ScavengeSpeedInBytesPerMillisecond();
size_t new_space_size = heap->new_space()->Size();
size_t new_space_capacity = heap->new_space()->Capacity();
Isolate* isolate() const { return isolate_; }
job_->NotifyIdleTask();
private:
Isolate* const isolate_;
ScavengeJob* const job_;
};
if (ReachedIdleAllocationLimit(scavenge_speed_in_bytes_per_ms, new_space_size,
new_space_capacity)) {
if (EnoughIdleTimeForScavenge(
idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
heap->CollectGarbage(NEW_SPACE, GarbageCollectionReason::kIdleTask);
} else {
// Immediately request another idle task that can get larger idle time.
job_->RescheduleIdleTask(heap);
}
}
size_t ScavengeJob::YoungGenerationTaskTriggerSize(Heap* heap) {
static constexpr double kTaskTriggerFactor = 0.618;
return heap->new_space()->Capacity() * kTaskTriggerFactor;
}
bool ScavengeJob::ReachedIdleAllocationLimit(
double scavenge_speed_in_bytes_per_ms, size_t new_space_size,
size_t new_space_capacity) {
if (scavenge_speed_in_bytes_per_ms == 0) {
scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
}
// Set the allocation limit to the number of bytes we can scavenge in an
// average idle task.
double allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
// Keep the limit smaller than the new space capacity.
allocation_limit =
Min<double>(allocation_limit,
new_space_capacity * kMaxAllocationLimitAsFractionOfNewSpace);
// Adjust the limit to take into account bytes that will be allocated until
// the next check and keep the limit large enough to avoid scavenges in tiny
// new space.
allocation_limit =
Max<double>(allocation_limit - kBytesAllocatedBeforeNextIdleTask,
kMinAllocationLimit);
return allocation_limit <= new_space_size;
bool ScavengeJob::YoungGenerationSizeTaskTriggerReached(Heap* heap) {
return heap->new_space()->Size() >= YoungGenerationTaskTriggerSize(heap);
}
bool ScavengeJob::EnoughIdleTimeForScavenge(
double idle_time_in_ms, double scavenge_speed_in_bytes_per_ms,
size_t new_space_size) {
if (scavenge_speed_in_bytes_per_ms == 0) {
scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
void ScavengeJob::ScheduleTaskIfNeeded(Heap* heap) {
if (!task_pending_ && !heap->IsTearingDown() &&
YoungGenerationSizeTaskTriggerReached(heap)) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
taskrunner->PostTask(std::make_unique<Task>(heap->isolate(), this));
task_pending_ = true;
}
return new_space_size <= idle_time_in_ms * scavenge_speed_in_bytes_per_ms;
}
void ScavengeJob::Task::RunInternal() {
VMState<GC> state(isolate());
TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
void ScavengeJob::RescheduleIdleTask(Heap* heap) {
// Make sure that we don't reschedule more than one time.
// Otherwise, we might spam the scheduler with idle tasks.
if (!idle_task_rescheduled_) {
ScheduleIdleTask(heap);
idle_task_rescheduled_ = true;
if (ScavengeJob::YoungGenerationSizeTaskTriggerReached(isolate()->heap())) {
isolate()->heap()->CollectGarbage(NEW_SPACE,
GarbageCollectionReason::kTask);
}
}
void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
bytes_allocated_since_the_last_task_ += bytes_allocated;
if (bytes_allocated_since_the_last_task_ >=
static_cast<int>(kBytesAllocatedBeforeNextIdleTask)) {
ScheduleIdleTask(heap);
bytes_allocated_since_the_last_task_ = 0;
idle_task_rescheduled_ = false;
}
job_->set_task_pending(false);
}
void ScavengeJob::ScheduleIdleTask(Heap* heap) {
if (!idle_task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
auto task = std::make_unique<IdleTask>(heap->isolate(), this);
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate)->PostIdleTask(
std::move(task));
}
}
}
} // namespace internal
} // namespace v8
......@@ -5,7 +5,6 @@
#ifndef V8_HEAP_SCAVENGE_JOB_H_
#define V8_HEAP_SCAVENGE_JOB_H_
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
namespace v8 {
......@@ -14,67 +13,24 @@ namespace internal {
class Heap;
class Isolate;
// This class posts idle tasks and performs scavenges in the idle tasks.
class V8_EXPORT_PRIVATE ScavengeJob {
// The scavenge job uses platform tasks to perform a young generation
// Scavenge garbage collection. The job posts a foreground task.
class ScavengeJob {
public:
class IdleTask : public CancelableIdleTask {
public:
explicit IdleTask(Isolate* isolate, ScavengeJob* job)
: CancelableIdleTask(isolate), isolate_(isolate), job_(job) {}
// CancelableIdleTask overrides.
void RunInternal(double deadline_in_seconds) override;
ScavengeJob() V8_NOEXCEPT = default;
Isolate* isolate() { return isolate_; }
void ScheduleTaskIfNeeded(Heap* heap);
private:
Isolate* isolate_;
ScavengeJob* job_;
};
static size_t YoungGenerationTaskTriggerSize(Heap* heap);
ScavengeJob()
: idle_task_pending_(false),
idle_task_rescheduled_(false),
bytes_allocated_since_the_last_task_(0) {}
// Posts an idle task if the cumulative bytes allocated since the last
// idle task exceed kBytesAllocatedBeforeNextIdleTask.
void ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated);
// Posts an idle task ignoring the bytes allocated, but makes sure
// that the new idle task cannot reschedule again.
// This prevents infinite rescheduling.
void RescheduleIdleTask(Heap* heap);
bool IdleTaskPending() { return idle_task_pending_; }
void NotifyIdleTask() { idle_task_pending_ = false; }
bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
static bool ReachedIdleAllocationLimit(double scavenge_speed_in_bytes_per_ms,
size_t new_space_size,
size_t new_space_capacity);
private:
class Task;
static bool EnoughIdleTimeForScavenge(double idle_time_ms,
double scavenge_speed_in_bytes_per_ms,
size_t new_space_size);
static bool YoungGenerationSizeTaskTriggerReached(Heap* heap);
// If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
static const int kInitialScavengeSpeedInBytesPerMs = 256 * KB;
// Estimate of the average idle time that an idle task gets.
static const int kAverageIdleTimeMs = 5;
// The number of bytes to be allocated in new space before the next idle
// task is posted.
static const size_t kBytesAllocatedBeforeNextIdleTask = 1024 * KB;
// The minimum size of allocated new space objects to trigger a scavenge.
static const size_t kMinAllocationLimit = 512 * KB;
// The allocation limit cannot exceed this fraction of the new space capacity.
static const double kMaxAllocationLimitAsFractionOfNewSpace;
void set_task_pending(bool value) { task_pending_ = value; }
private:
void ScheduleIdleTask(Heap* heap);
bool idle_task_pending_;
bool idle_task_rescheduled_;
int bytes_allocated_since_the_last_task_;
bool task_pending_ = false;
};
} // namespace internal
} // namespace v8
......
......@@ -38,7 +38,9 @@ v8_executable("unittests") {
"//testing/gtest",
]
data_deps = [ "../../tools:v8_testrunner" ]
data_deps = [
"../../tools:v8_testrunner",
]
data = [
"testcfg.py",
......@@ -175,7 +177,6 @@ v8_source_set("unittests_sources") {
"heap/memory-reducer-unittest.cc",
"heap/object-stats-unittest.cc",
"heap/off-thread-factory-unittest.cc",
"heap/scavenge-job-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
"heap/unmapper-unittest.cc",
......
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "src/common/globals.h"
#include "src/heap/scavenge-job.h"
#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
const size_t kScavengeSpeedInBytesPerMs = 500 * KB;
const size_t kNewSpaceCapacity = 8 * MB;
TEST(ScavengeJob, AllocationLimitEmptyNewSpace) {
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
kScavengeSpeedInBytesPerMs, 0, kNewSpaceCapacity));
}
TEST(ScavengeJob, AllocationLimitFullNewSpace) {
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
kScavengeSpeedInBytesPerMs, kNewSpaceCapacity, kNewSpaceCapacity));
}
TEST(ScavengeJob, AllocationLimitUnknownScavengeSpeed) {
size_t expected_size = ScavengeJob::kInitialScavengeSpeedInBytesPerMs *
ScavengeJob::kAverageIdleTimeMs -
ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
expected_size = Max(expected_size, ScavengeJob::kMinAllocationLimit);
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size - 1,
kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size,
kNewSpaceCapacity));
}
TEST(ScavengeJob, AllocationLimitLowScavengeSpeed) {
size_t scavenge_speed = 1 * KB;
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, ScavengeJob::kMinAllocationLimit - 1, kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, ScavengeJob::kMinAllocationLimit, kNewSpaceCapacity));
}
TEST(ScavengeJob, AllocationLimitAverageScavengeSpeed) {
size_t expected_size =
kScavengeSpeedInBytesPerMs * ScavengeJob::kAverageIdleTimeMs -
ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
kScavengeSpeedInBytesPerMs, ScavengeJob::kMinAllocationLimit,
kNewSpaceCapacity));
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
kScavengeSpeedInBytesPerMs, expected_size - 1, kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
kScavengeSpeedInBytesPerMs, expected_size, kNewSpaceCapacity));
}
TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
size_t scavenge_speed = kNewSpaceCapacity;
size_t expected_size =
static_cast<size_t>(
kNewSpaceCapacity *
ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace) -
ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, expected_size - 1, kNewSpaceCapacity));
EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
scavenge_speed, expected_size + 1, kNewSpaceCapacity));
}
TEST(ScavengeJob, EnoughIdleTimeForScavengeUnknownScavengeSpeed) {
size_t scavenge_speed = ScavengeJob::kInitialScavengeSpeedInBytesPerMs;
size_t new_space_size = 1 * MB;
size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
EXPECT_TRUE(
ScavengeJob::EnoughIdleTimeForScavenge(expected_time, 0, new_space_size));
EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(expected_time - 1, 0,
new_space_size));
}
TEST(ScavengeJob, EnoughIdleTimeForScavengeLowScavengeSpeed) {
size_t scavenge_speed = 1 * KB;
size_t new_space_size = 1 * MB;
size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
expected_time, scavenge_speed, new_space_size));
EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
expected_time - 1, scavenge_speed, new_space_size));
}
TEST(ScavengeJob, EnoughIdleTimeForScavengeHighScavengeSpeed) {
size_t scavenge_speed = kNewSpaceCapacity;
size_t new_space_size = 1 * MB;
size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
expected_time, scavenge_speed, new_space_size));
EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
expected_time - 1, scavenge_speed, new_space_size));
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment