Commit ae39e059 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Take survival rates of young objects into account when choosing old generation limits.

Stable high survival rates of young objects both during partial and
full collection indicate that mutator is either building or modifying
a structure with a long lifetime.

In this case we aggressively raise old generation memory limits to
postpone subsequent mark-sweep collection and trade memory space
for the mutation speed.

Review URL: http://codereview.chromium.org/2809032

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4938 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4695ef1a
......@@ -126,6 +126,13 @@ int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0;
int Heap::contexts_disposed_ = 0;
int Heap::young_survivors_after_last_gc_ = 0;
int Heap::high_survival_rate_period_length_ = 0;
int Heap::survival_rate_ = 0;
Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
bool Heap::bumped_old_gen_limits_ = false;
#ifdef DEBUG
bool Heap::allocation_allowed_ = true;
......@@ -582,6 +589,29 @@ static void VerifyPageWatermarkValidity(PagedSpace* space,
}
#endif
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
start_new_space_size;
if (survival_rate > kYoungSurvivalRateThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
double survival_rate_diff = survival_rate_ - survival_rate;
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
set_survival_rate_trend(DECREASING);
} else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
set_survival_rate_trend(INCREASING);
} else {
set_survival_rate_trend(STABLE);
}
survival_rate_ = survival_rate;
}
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
......@@ -604,6 +634,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
EnsureFromSpaceIsCommitted();
int start_new_space_size = Heap::new_space()->Size();
if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) {
// Flush all potentially unused code.
......@@ -613,16 +645,50 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
IsStableOrIncreasingSurvivalTrend();
UpdateSurvivalRateTrend(start_new_space_size);
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_promotion_limit_ *= 2;
old_gen_allocation_limit_ *= 2;
bumped_old_gen_limits_ = true;
}
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
Scavenge();
tracer_ = NULL;
UpdateSurvivalRateTrend(start_new_space_size);
if (bumped_old_gen_limits_ &&
!IsHighSurvivalRate() &&
!IsIncreasingSurvivalTrend()) {
// We previously observed high survival rates in young space and decided
// to bump old space allocation limits to trade the space for the speed
// but now survival rates are dropping which indicates that mutator
// finished updating tenured data structure. So we can decrease old space
// limits to guarantee an early full GC cycle and reduce memory footprint.
old_gen_promotion_limit_ /= 2;
old_gen_allocation_limit_ /= 2;
bumped_old_gen_limits_ = false;
}
}
Counters::objs_since_last_young.Set(0);
......
......@@ -1005,6 +1005,7 @@ class Heap : public AllStatic {
static void CheckNewSpaceExpansionCriteria();
static inline void IncrementYoungSurvivorsCounter(int survived) {
young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
......@@ -1272,6 +1273,56 @@ class Heap : public AllStatic {
// be replaced with a lazy compilable version.
static void FlushCode();
static void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
static const int kYoungSurvivalRateThreshold = 90;
static const int kYoungSurvivalRateAllowedDeviation = 15;
static int young_survivors_after_last_gc_;
static int high_survival_rate_period_length_;
static int survival_rate_;
static SurvivalRateTrend previous_survival_rate_trend_;
static SurvivalRateTrend survival_rate_trend_;
static bool bumped_old_gen_limits_;
static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
ASSERT(survival_rate_trend != FLUCTUATING);
previous_survival_rate_trend_ = survival_rate_trend_;
survival_rate_trend_ = survival_rate_trend;
}
static SurvivalRateTrend survival_rate_trend() {
if (survival_rate_trend_ == STABLE) {
return STABLE;
} else if (previous_survival_rate_trend_ == STABLE) {
return survival_rate_trend_;
} else if (survival_rate_trend_ != previous_survival_rate_trend_) {
return FLUCTUATING;
} else {
return survival_rate_trend_;
}
}
static bool IsStableOrIncreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case INCREASING:
return true;
default:
return false;
}
}
static bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
}
static bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment