Commit 83849a34 authored by hpayer's avatar hpayer Committed by Commit bot

Revert of Shrink initial old generation size based on new space survival rate....

Revert of Shrink initial old generation size based on new space survival rate. (patchset #7 id:140001 of https://codereview.chromium.org/785593002/)

Reason for revert:
Performance regressions.

Original issue's description:
> Shrink initial old generation size based on new space  survival rate.
>
> BUG=
>
> Committed: https://crrev.com/37f9bb72b9c7ea4eccef6cb6533b3c42792fb5e6
> Cr-Commit-Position: refs/heads/master@{#25850}

TBR=jochen@chromium.org,ulan@chromium.org
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/813733002

Cr-Commit-Position: refs/heads/master@{#25862}
parent d0a7ef93
......@@ -557,7 +557,6 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
......
......@@ -31,11 +31,6 @@ GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
}
GCTracer::SurvivalEvent::SurvivalEvent(double survival_rate) {
survival_rate_ = survival_rate;
}
GCTracer::Event::Event(Type type, const char* gc_reason,
const char* collector_reason)
: type(type),
......@@ -257,11 +252,6 @@ void GCTracer::AddContextDisposalTime(double time) {
}
void GCTracer::AddSurvivalRate(double survival_rate) {
survival_events_.push_front(SurvivalEvent(survival_rate));
}
void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
cumulative_incremental_marking_steps_++;
cumulative_incremental_marking_bytes_ += bytes;
......@@ -371,10 +361,8 @@ void GCTracer::PrintNVP() const {
PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
PrintF("average_survival_rate%.1f%% ", AverageSurvivalRate());
PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
NewSpaceAllocationThroughputInBytesPerMillisecond());
PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
......@@ -568,24 +556,5 @@ double GCTracer::ContextDisposalRateInMilliseconds() const {
return (begin - end) / context_disposal_events_.size();
}
double GCTracer::AverageSurvivalRate() const {
if (survival_events_.size() == 0) return 0.0;
double sum_of_rates = 0.0;
SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
while (iter != survival_events_.end()) {
sum_of_rates += iter->survival_rate_;
++iter;
}
return sum_of_rates / static_cast<double>(survival_events_.size());
}
bool GCTracer::SurvivalEventsRecorded() const {
return survival_events_.size() > 0;
}
}
} // namespace v8::internal
......@@ -159,17 +159,6 @@ class GCTracer {
};
class SurvivalEvent {
public:
// Default constructor leaves the event uninitialized.
SurvivalEvent() {}
explicit SurvivalEvent(double survival_rate);
double survival_rate_;
};
class Event {
public:
enum Type {
......@@ -278,8 +267,6 @@ class GCTracer {
typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
ContextDisposalEventBuffer;
typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
explicit GCTracer(Heap* heap);
// Start collecting data.
......@@ -294,8 +281,6 @@ class GCTracer {
void AddContextDisposalTime(double time);
void AddSurvivalRate(double survival_rate);
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
......@@ -382,14 +367,6 @@ class GCTracer {
// Returns 0 if no events have been recorded.
double ContextDisposalRateInMilliseconds() const;
// Computes the average survival rate based on the last recorded survival
// events.
// Returns 0 if no events have been recorded.
double AverageSurvivalRate() const;
// Returns true if at least one survival event was recorded.
bool SurvivalEventsRecorded() const;
private:
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
......@@ -440,12 +417,8 @@ class GCTracer {
// RingBuffer for allocation events.
AllocationEventBuffer allocation_events_;
// RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_;
// RingBuffer for survival events.
SurvivalEventBuffer survival_events_;
// Cumulative number of incremental marking steps since creation of tracer.
int cumulative_incremental_marking_steps_;
......
......@@ -63,8 +63,6 @@ Heap::Heap()
initial_semispace_size_(Page::kPageSize),
target_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_),
old_generation_size_configured_(false),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
......@@ -99,7 +97,7 @@ Heap::Heap()
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
......@@ -109,9 +107,8 @@ Heap::Heap()
tracer_(this),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_ratio_(0),
promotion_rate_(0),
semi_space_copied_object_size_(0),
previous_semi_space_copied_object_size_(0),
semi_space_copied_rate_(0),
nodes_died_in_new_space_(0),
nodes_copied_in_new_space_(0),
......@@ -436,7 +433,6 @@ void Heap::GarbageCollectionPrologue() {
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
semi_space_copied_object_size_ = 0;
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
......@@ -1040,23 +1036,14 @@ void Heap::ClearNormalizedMapCaches() {
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
if (previous_semi_space_copied_object_size_ > 0) {
promotion_rate_ =
(static_cast<double>(promoted_objects_size_) /
static_cast<double>(previous_semi_space_copied_object_size_) * 100);
} else {
promotion_rate_ = 0;
}
semi_space_copied_rate_ =
(static_cast<double>(semi_space_copied_object_size_) /
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
tracer()->AddSurvivalRate(survival_rate);
double survival_rate = promotion_rate_ + semi_space_copied_rate_;
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
......@@ -1114,13 +1101,11 @@ bool Heap::PerformGarbageCollection(
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
} else {
Scavenge();
}
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
isolate_->counters()->objs_since_last_young()->Set(0);
......@@ -2356,17 +2341,6 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
}
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
Max(kMinimumOldGenerationAllocationLimit,
static_cast<intptr_t>(
static_cast<double>(initial_old_generation_size_) *
(tracer()->AverageSurvivalRate() / 100)));
}
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
......@@ -5179,13 +5153,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
initial_old_generation_size_ = max_old_generation_size_;
}
old_generation_allocation_limit_ = initial_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
......
......@@ -1506,8 +1506,6 @@ class Heap {
int initial_semispace_size_;
int target_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t initial_old_generation_size_;
bool old_generation_size_configured_;
intptr_t max_executable_size_;
intptr_t maximum_committed_;
......@@ -1995,10 +1993,8 @@ class Heap {
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
double promotion_ratio_;
double promotion_rate_;
intptr_t semi_space_copied_object_size_;
intptr_t previous_semi_space_copied_object_size_;
double semi_space_copied_rate_;
int nodes_died_in_new_space_;
int nodes_copied_in_new_space_;
......@@ -2014,8 +2010,6 @@ class Heap {
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize();
void SelectScavengingVisitorsTable();
void IdleMarkCompact(const char* message);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment