Commit 40bb3a58 authored by mlippautz's avatar mlippautz Committed by Commit bot

Remove high promotion mode

High promotion mode limits new space size in the case where many objects (high
rate) are promoted.  The result is frequent scavenger runs with low pause times. The
approach conflicts with pretenuring which requires a larger new space size to
collect enough feedback to be usable.

Ultimately the goal is to have pretenuring catch allocation sites that have high
promotion rates.

Revert "Bring back high promotion mode to shrink young generation size when scavenging latency is high."

This reverts commit 37be1d5e.

Revert "GC. Delay/avoid entering high promotion mode"

This reverts commit a5221d07.

BUG=chromium:499507,chromium:506910
LOG=n

Review URL: https://codereview.chromium.org/1247723003

Cr-Commit-Position: refs/heads/master@{#29983}
parent df1f72bb
......@@ -115,12 +115,8 @@ Heap::Heap()
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(this),
new_space_high_promotion_mode_active_(false),
gathering_lifetime_feedback_(0),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
low_survival_rate_period_length_(0),
survival_rate_(0),
promotion_ratio_(0),
semi_space_copied_object_size_(0),
previous_semi_space_copied_object_size_(0),
......@@ -129,8 +125,6 @@ Heap::Heap()
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
maximum_size_scavenges_(0),
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
max_gc_pause_(0.0),
total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
......@@ -1203,24 +1197,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
} else {
high_survival_rate_period_length_ = 0;
}
if (survival_rate < kYoungSurvivalRateLowThreshold) {
low_survival_rate_period_length_++;
} else {
low_survival_rate_period_length_ = 0;
}
double survival_rate_diff = survival_rate_ - survival_rate;
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
set_survival_rate_trend(DECREASING);
} else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
set_survival_rate_trend(INCREASING);
} else {
set_survival_rate_trend(STABLE);
}
survival_rate_ = survival_rate;
}
bool Heap::PerformGarbageCollection(
......@@ -1278,16 +1254,8 @@ bool Heap::PerformGarbageCollection(
Scavenge();
}
bool deopted = ProcessPretenuringFeedback();
ProcessPretenuringFeedback();
UpdateSurvivalStatistics(start_new_space_size);
// When pretenuring is collecting new feedback, we do not shrink the new space
// right away.
if (deopted) {
RecordDeoptForPretenuring();
} else {
ConfigureNewGenerationSize();
}
ConfigureInitialOldGenerationSize();
isolate_->counters()->objs_since_last_young()->Set(0);
......@@ -1513,8 +1481,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
survived_since_last_expansion_ = 0;
}
} else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.TotalCapacity() &&
!new_space_high_promotion_mode_active_) {
survived_since_last_expansion_ > new_space_.TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_.Grow();
......@@ -2690,48 +2657,6 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
void Heap::ConfigureNewGenerationSize() {
bool still_gathering_lifetime_data = gathering_lifetime_feedback_ != 0;
if (gathering_lifetime_feedback_ != 0) gathering_lifetime_feedback_--;
if (!new_space_high_promotion_mode_active_ &&
new_space_.TotalCapacity() == new_space_.MaximumCapacity() &&
IsStableOrIncreasingSurvivalTrend() && IsHighSurvivalRate()) {
// Stable high survival rates even though young generation is at
// maximum capacity indicates that most objects will be promoted.
// To decrease scavenger pauses and final mark-sweep pauses, we
// have to limit maximal capacity of the young generation.
if (still_gathering_lifetime_data) {
if (FLAG_trace_gc) {
PrintPID(
"Postpone entering high promotion mode as optimized pretenuring "
"code is still being generated\n");
}
} else {
new_space_high_promotion_mode_active_ = true;
if (FLAG_trace_gc) {
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialTotalCapacity() / MB);
}
}
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() && IsLowSurvivalRate()) {
// Decreasing low survival rates might indicate that the above high
// promotion mode is over and we should allow the young generation
// to grow again.
new_space_high_promotion_mode_active_ = false;
if (FLAG_trace_gc) {
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
}
if (new_space_high_promotion_mode_active_ &&
new_space_.TotalCapacity() > new_space_.InitialTotalCapacity()) {
new_space_.Shrink();
}
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
......
......@@ -1393,10 +1393,6 @@ class Heap {
return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
}
// Record the fact that we generated some optimized code since the last GC
// which will pretenure some previously unpretenured allocation.
void RecordDeoptForPretenuring() { gathering_lifetime_feedback_ = 2; }
// Update GC statistics that are tracked on the Heap.
void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
double marking_time);
......@@ -2191,23 +2187,13 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
static const int kYoungSurvivalRateHighThreshold = 90;
static const int kYoungSurvivalRateLowThreshold = 10;
static const int kYoungSurvivalRateAllowedDeviation = 15;
static const int kOldSurvivalRateLowThreshold = 10;
bool new_space_high_promotion_mode_active_;
// If this is non-zero, then there is hope yet that the optimized code we
// have generated will solve our high promotion rate problems, so we don't
// need to go into high promotion mode just yet.
int gathering_lifetime_feedback_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
int low_survival_rate_period_length_;
double survival_rate_;
double promotion_ratio_;
double promotion_rate_;
intptr_t semi_space_copied_object_size_;
......@@ -2223,59 +2209,12 @@ class Heap {
// of the allocation site.
unsigned int maximum_size_scavenges_;
SurvivalRateTrend previous_survival_rate_trend_;
SurvivalRateTrend survival_rate_trend_;
void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
DCHECK(survival_rate_trend != FLUCTUATING);
previous_survival_rate_trend_ = survival_rate_trend_;
survival_rate_trend_ = survival_rate_trend;
}
SurvivalRateTrend survival_rate_trend() {
if (survival_rate_trend_ == STABLE) {
return STABLE;
} else if (previous_survival_rate_trend_ == STABLE) {
return survival_rate_trend_;
} else if (survival_rate_trend_ != previous_survival_rate_trend_) {
return FLUCTUATING;
} else {
return survival_rate_trend_;
}
}
bool IsStableOrIncreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case INCREASING:
return true;
default:
return false;
}
}
bool IsStableOrDecreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case DECREASING:
return true;
default:
return false;
}
}
bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
}
bool IsLowSurvivalRate() { return low_survival_rate_period_length_ > 0; }
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize();
void ConfigureNewGenerationSize();
void SelectScavengingVisitorsTable();
bool HasLowYoungGenerationAllocationRate();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment