Commit 1e3ee8cc authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Adjust incremental marking step size heuristics.

After 52e8d0ab incremental marking observer is invoked ~8 times more
often than before. This patch increases the allocation observer
threshold for incremental marking and scales the step size based
on the number of concurrent marking tasks.

Bug: chromium:768664
TBR: mlippautz@chromium.org
Change-Id: I0afd5dccd55f32c7f545d9c3a47edc20c6fd83db
Reviewed-on: https://chromium-review.googlesource.com/683955Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48154}
parent 5a5783e3
...@@ -51,6 +51,8 @@ class ConcurrentMarking { ...@@ -51,6 +51,8 @@ class ConcurrentMarking {
// scavenge and is going to be re-used. // scavenge and is going to be re-used.
void ClearLiveness(MemoryChunk* chunk); void ClearLiveness(MemoryChunk* chunk);
int TaskCount() { return task_count_; }
private: private:
struct TaskState { struct TaskState {
// When the concurrent marking task has this lock, then objects in the // When the concurrent marking task has this lock, then objects in the
......
...@@ -60,8 +60,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap) ...@@ -60,8 +60,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
finalize_marking_completed_(false), finalize_marking_completed_(false),
trace_wrappers_toggle_(false), trace_wrappers_toggle_(false),
request_type_(NONE), request_type_(NONE),
new_generation_observer_(*this, kAllocatedThreshold), new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(*this, kAllocatedThreshold) { old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
SetState(STOPPED); SetState(STOPPED);
} }
...@@ -1057,8 +1057,8 @@ size_t IncrementalMarking::StepSizeToMakeProgress() { ...@@ -1057,8 +1057,8 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
// leave marking work to standalone tasks. The ramp up duration and the // leave marking work to standalone tasks. The ramp up duration and the
// target step count are chosen based on benchmarks. // target step count are chosen based on benchmarks.
const int kRampUpIntervalMs = 300; const int kRampUpIntervalMs = 300;
const size_t kTargetStepCount = 128; const size_t kTargetStepCount = 256;
const size_t kTargetStepCountAtOOM = 16; const size_t kTargetStepCountAtOOM = 32;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB; size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
if (heap()->IsCloseToOutOfMemory(oom_slack)) { if (heap()->IsCloseToOutOfMemory(oom_slack)) {
...@@ -1066,7 +1066,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() { ...@@ -1066,7 +1066,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
} }
size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount, size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
IncrementalMarking::kAllocatedThreshold); IncrementalMarking::kMinStepSizeInBytes);
double time_passed_ms = double time_passed_ms =
heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_; heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0); double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
...@@ -1084,7 +1084,7 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() { ...@@ -1084,7 +1084,7 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
size_t bytes_to_process = size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress(); StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) { if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
// The first step after Scavenge will see many allocated bytes. // The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly. // Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize( size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
...@@ -1092,6 +1092,13 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() { ...@@ -1092,6 +1092,13 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size); bytes_to_process = Min(bytes_to_process, max_step_size);
if (FLAG_concurrent_marking && marking_worklist()->IsBailoutEmpty()) {
// The number of background tasks + the main thread.
size_t tasks = heap()->concurrent_marking()->TaskCount() + 1;
bytes_to_process = Max(IncrementalMarking::kMinStepSizeInBytes,
bytes_to_process / tasks);
}
size_t bytes_processed = 0; size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) { if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule. // Steps performed in tasks have put us ahead of schedule.
......
...@@ -188,7 +188,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -188,7 +188,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// incremental marker until it completes. // incremental marker until it completes.
// Do some marking every time this much memory has been allocated or that many // Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked. // heavy (color-checking) write barriers have been invoked.
static const size_t kAllocatedThreshold = 64 * KB; static const size_t kYoungGenerationAllocatedThreshold = 64 * KB;
static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
static const size_t kMinStepSizeInBytes = 64 * KB;
static const int kStepSizeInMs = 1; static const int kStepSizeInMs = 1;
static const int kMaxStepSizeInMs = 5; static const int kMaxStepSizeInMs = 5;
......
...@@ -511,6 +511,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -511,6 +511,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
shared_.Clear(); shared_.Clear();
} }
bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
bool IsEmpty() { bool IsEmpty() {
return bailout_.IsLocalEmpty(kMainThread) && return bailout_.IsLocalEmpty(kMainThread) &&
shared_.IsLocalEmpty(kMainThread) && shared_.IsLocalEmpty(kMainThread) &&
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment