Commit 1beb89f2 authored by ulan's avatar ulan Committed by Commit bot

[heap] New heuristics for incremental marking step size.

This patch simplifies code for speeding up marking and
removes write barrier counter.

The step size is now computed based in two parts:
- bytes to mark in order to keep up with allocation,
- bytes to mark in order to make progress.

BUG=chromium:616434, chromium:646139, chromium:644819
LOG=NO

Review-Url: https://codereview.chromium.org/2359903002
Cr-Commit-Position: refs/heads/master@{#39827}
parent 4c2fd5cd
...@@ -3238,16 +3238,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( ...@@ -3238,16 +3238,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental; Label need_incremental;
Label need_incremental_pop_scratch; Label need_incremental_pop_scratch;
__ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
__ ldr(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
__ str(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ b(mi, &need_incremental);
// Let's look at the color of the object: If it is not black we don't have // Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker. // to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
......
...@@ -3155,16 +3155,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( ...@@ -3155,16 +3155,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental; Label need_incremental;
Label need_incremental_pop_scratch; Label need_incremental_pop_scratch;
Register mem_chunk = regs_.scratch0();
Register counter = regs_.scratch1();
__ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
__ Ldr(counter,
MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
__ Subs(counter, counter, 1);
__ Str(counter,
MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
__ B(mi, &need_incremental);
// If the object is not black we don't have to inform the incremental marker. // If the object is not black we don't have to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
......
...@@ -115,7 +115,6 @@ Heap::Heap() ...@@ -115,7 +115,6 @@ Heap::Heap()
inline_allocation_disabled_(false), inline_allocation_disabled_(false),
total_regexp_code_generated_(0), total_regexp_code_generated_(0),
tracer_(nullptr), tracer_(nullptr),
high_survival_rate_period_length_(0),
promoted_objects_size_(0), promoted_objects_size_(0),
promotion_ratio_(0), promotion_ratio_(0),
semi_space_copied_object_size_(0), semi_space_copied_object_size_(0),
...@@ -141,7 +140,7 @@ Heap::Heap() ...@@ -141,7 +140,7 @@ Heap::Heap()
full_codegen_bytes_generated_(0), full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0), crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0), new_space_allocation_counter_(0),
old_generation_allocation_counter_(0), old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0), old_generation_size_at_last_gc_(0),
gcs_since_last_deopt_(0), gcs_since_last_deopt_(0),
global_pretenuring_feedback_(nullptr), global_pretenuring_feedback_(nullptr),
...@@ -1269,11 +1268,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) { ...@@ -1269,11 +1268,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
double survival_rate = promotion_ratio_ + semi_space_copied_rate_; double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
tracer()->AddSurvivalRatio(survival_rate); tracer()->AddSurvivalRatio(survival_rate);
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
} }
bool Heap::PerformGarbageCollection( bool Heap::PerformGarbageCollection(
...@@ -1310,13 +1304,6 @@ bool Heap::PerformGarbageCollection( ...@@ -1310,13 +1304,6 @@ bool Heap::PerformGarbageCollection(
int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
if (IsHighSurvivalRate()) {
// We speed up the incremental marker if it is running so that it
// does not fall behind the rate of promotion, which would cause a
// constantly growing old space.
incremental_marking()->NotifyOfHighPromotionRate();
}
{ {
Heap::PretenuringScope pretenuring_scope(this); Heap::PretenuringScope pretenuring_scope(this);
...@@ -1327,7 +1314,7 @@ bool Heap::PerformGarbageCollection( ...@@ -1327,7 +1314,7 @@ bool Heap::PerformGarbageCollection(
old_generation_size_configured_ = true; old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which // This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during GC. // can cause another GC. Take into account the objects promoted during GC.
old_generation_allocation_counter_ += old_generation_allocation_counter_at_last_gc_ +=
static_cast<size_t>(promoted_objects_size_); static_cast<size_t>(promoted_objects_size_);
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
} else { } else {
......
...@@ -1387,16 +1387,18 @@ class Heap { ...@@ -1387,16 +1387,18 @@ class Heap {
} }
void UpdateOldGenerationAllocationCounter() { void UpdateOldGenerationAllocationCounter() {
old_generation_allocation_counter_ = OldGenerationAllocationCounter(); old_generation_allocation_counter_at_last_gc_ =
OldGenerationAllocationCounter();
} }
size_t OldGenerationAllocationCounter() { size_t OldGenerationAllocationCounter() {
return old_generation_allocation_counter_ + PromotedSinceLastGC(); return old_generation_allocation_counter_at_last_gc_ +
PromotedSinceLastGC();
} }
// This should be used only for testing. // This should be used only for testing.
void set_old_generation_allocation_counter(size_t new_value) { void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
old_generation_allocation_counter_ = new_value; old_generation_allocation_counter_at_last_gc_ = new_value;
} }
size_t PromotedSinceLastGC() { size_t PromotedSinceLastGC() {
...@@ -1723,10 +1725,6 @@ class Heap { ...@@ -1723,10 +1725,6 @@ class Heap {
// Flush the number to string cache. // Flush the number to string cache.
void FlushNumberStringCache(); void FlushNumberStringCache();
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize(); void ConfigureInitialOldGenerationSize();
bool HasLowYoungGenerationAllocationRate(); bool HasLowYoungGenerationAllocationRate();
...@@ -2201,7 +2199,6 @@ class Heap { ...@@ -2201,7 +2199,6 @@ class Heap {
GCTracer* tracer_; GCTracer* tracer_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_; intptr_t promoted_objects_size_;
double promotion_ratio_; double promotion_ratio_;
double promotion_rate_; double promotion_rate_;
...@@ -2260,7 +2257,7 @@ class Heap { ...@@ -2260,7 +2257,7 @@ class Heap {
// This counter is increased before each GC and never reset. To // This counter is increased before each GC and never reset. To
// account for the bytes allocated since the last GC, use the // account for the bytes allocated since the last GC, use the
// OldGenerationAllocationCounter() function. // OldGenerationAllocationCounter() function.
size_t old_generation_allocation_counter_; size_t old_generation_allocation_counter_at_last_gc_;
// The size of objects in old generation after the last MarkCompact GC. // The size of objects in old generation after the last MarkCompact GC.
size_t old_generation_size_at_last_gc_; size_t old_generation_size_at_last_gc_;
......
This diff is collapsed.
...@@ -108,31 +108,19 @@ class IncrementalMarking { ...@@ -108,31 +108,19 @@ class IncrementalMarking {
// incremental marker until it completes. // incremental marker until it completes.
// Do some marking every time this much memory has been allocated or that many // Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked. // heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536; static const size_t kAllocatedThreshold = 64 * KB;
static const intptr_t kWriteBarriersInvokedThreshold = 32768;
// Start off by marking this many times more memory than has been allocated. static const int kStepSizeInMs = 1;
static const intptr_t kInitialMarkingSpeed = 1; static const int kMaxStepSizeInMs = 5;
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
static const intptr_t kStepSizeInMs = 1;
// This is the upper bound for how many times we allow finalization of // This is the upper bound for how many times we allow finalization of
// incremental marking to be postponed. // incremental marking to be postponed.
static const size_t kMaxIdleMarkingDelayCounter = 3; static const int kMaxIdleMarkingDelayCounter = 3;
void FinalizeSweeping(); void FinalizeSweeping();
void NotifyAllocatedBytes(intptr_t allocated_bytes); size_t Step(size_t bytes_to_process, CompletionAction action,
ForceCompletionAction completion, StepOrigin step_origin);
void Step(intptr_t bytes_to_process, CompletionAction action,
ForceCompletionAction completion, StepOrigin origin);
inline void RestartIfNotMarking(); inline void RestartIfNotMarking();
...@@ -175,8 +163,6 @@ class IncrementalMarking { ...@@ -175,8 +163,6 @@ class IncrementalMarking {
void ActivateGeneratedStub(Code* stub); void ActivateGeneratedStub(Code* stub);
void NotifyOfHighPromotionRate();
void NotifyIncompleteScanOfObject(int unscanned_bytes) { void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes; unscanned_bytes_of_large_object_ = unscanned_bytes;
} }
...@@ -235,7 +221,7 @@ class IncrementalMarking { ...@@ -235,7 +221,7 @@ class IncrementalMarking {
incremental_marking_(incremental_marking) {} incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override { void Step(int bytes_allocated, Address, size_t) override {
incremental_marking_.NotifyAllocatedBytes(bytes_allocated); incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
} }
private: private:
...@@ -244,10 +230,6 @@ class IncrementalMarking { ...@@ -244,10 +230,6 @@ class IncrementalMarking {
int64_t SpaceLeftInOldSpace(); int64_t SpaceLeftInOldSpace();
void SpeedUp();
void ResetStepCounters();
void StartMarking(); void StartMarking();
void StartBlackAllocation(); void StartBlackAllocation();
...@@ -283,38 +265,36 @@ class IncrementalMarking { ...@@ -283,38 +265,36 @@ class IncrementalMarking {
void IncrementIdleMarkingDelayCounter(); void IncrementIdleMarkingDelayCounter();
Heap* heap_; void AdvanceIncrementalMarkingOnAllocation();
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
Observer observer_; Heap* heap_;
State state_; State state_;
bool is_compacting_;
int steps_count_; double start_time_ms_;
int64_t old_generation_space_available_at_start_of_incremental_; size_t initial_old_generation_size_;
int64_t old_generation_space_used_at_start_of_incremental_; size_t old_generation_allocation_counter_;
int64_t bytes_rescanned_; size_t bytes_allocated_;
bool should_hurry_; size_t bytes_marked_ahead_of_schedule_;
int marking_speed_; size_t unscanned_bytes_of_large_object_;
intptr_t bytes_scanned_;
intptr_t allocated_;
intptr_t write_barriers_invoked_since_last_step_;
intptr_t bytes_marked_ahead_of_schedule_;
size_t idle_marking_delay_counter_;
int unscanned_bytes_of_large_object_; int idle_marking_delay_counter_;
int incremental_marking_finalization_rounds_;
bool is_compacting_;
bool should_hurry_;
bool was_activated_; bool was_activated_;
bool black_allocation_; bool black_allocation_;
bool finalize_marking_completed_; bool finalize_marking_completed_;
int incremental_marking_finalization_rounds_;
GCRequestType request_type_; GCRequestType request_type_;
IncrementalMarkingJob incremental_marking_job_; IncrementalMarkingJob incremental_marking_job_;
Observer new_generation_observer_;
Observer old_generation_observer_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
}; };
......
...@@ -3190,17 +3190,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( ...@@ -3190,17 +3190,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Mode mode) { Mode mode) {
Label object_is_black, need_incremental, need_incremental_pop_object; Label object_is_black, need_incremental, need_incremental_pop_object;
__ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ and_(regs_.scratch0(), regs_.object());
__ mov(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ sub(regs_.scratch1(), Immediate(1));
__ mov(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
// Let's look at the color of the object: If it is not black we don't have // Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker. // to inform the incremental marker.
__ JumpIfBlack(regs_.object(), __ JumpIfBlack(regs_.object(),
......
...@@ -3129,17 +3129,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( ...@@ -3129,17 +3129,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental; Label need_incremental;
Label need_incremental_pop_object; Label need_incremental_pop_object;
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ andp(regs_.scratch0(), regs_.object());
__ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ subp(regs_.scratch1(), Immediate(1));
__ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
// Let's look at the color of the object: If it is not black we don't have // Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker. // to inform the incremental marker.
__ JumpIfBlack(regs_.object(), __ JumpIfBlack(regs_.object(),
......
...@@ -6209,7 +6209,8 @@ TEST(OldSpaceAllocationCounter) { ...@@ -6209,7 +6209,8 @@ TEST(OldSpaceAllocationCounter) {
CHECK_LE(kSize, counter4 - counter3); CHECK_LE(kSize, counter4 - counter3);
// Test counter overflow. // Test counter overflow.
size_t max_counter = -1; size_t max_counter = -1;
heap->set_old_generation_allocation_counter(max_counter - 10 * kSize); heap->set_old_generation_allocation_counter_at_last_gc(max_counter -
10 * kSize);
size_t start = heap->OldGenerationAllocationCounter(); size_t start = heap->OldGenerationAllocationCounter();
for (int i = 0; i < 20; i++) { for (int i = 0; i < 20; i++) {
AllocateInSpace(isolate, kSize, OLD_SPACE); AllocateInSpace(isolate, kSize, OLD_SPACE);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment