Commit 6a5ab4b2 authored by ulan@chromium.org's avatar ulan@chromium.org

Grow heap slower if GC freed many global handles.

BUG=263503
LOG=Y
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/352763002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22003 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a2f85d6c
...@@ -611,21 +611,21 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v, ...@@ -611,21 +611,21 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
} }
bool GlobalHandles::PostGarbageCollectionProcessing( int GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, GCTracer* tracer) { GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the // Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary // GC is completely done, because the callbacks may invoke arbitrary
// API functions. // API functions.
ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC); ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_; const int initial_post_gc_processing_count = ++post_gc_processing_count_;
bool next_gc_likely_to_collect_more = false; int freed_nodes = 0;
if (collector == SCAVENGER) { if (collector == SCAVENGER) {
for (int i = 0; i < new_space_nodes_.length(); ++i) { for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i]; Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list()); ASSERT(node->is_in_new_space_list());
if (!node->IsRetainer()) { if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute // Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more. // the freed_nodes.
continue; continue;
} }
// Skip dependent handles. Their weak callbacks might expect to be // Skip dependent handles. Their weak callbacks might expect to be
...@@ -641,29 +641,29 @@ bool GlobalHandles::PostGarbageCollectionProcessing( ...@@ -641,29 +641,29 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// PostGarbageCollection processing. The current node might // PostGarbageCollection processing. The current node might
// have been deleted in that round, so we need to bail out (or // have been deleted in that round, so we need to bail out (or
// restart the processing). // restart the processing).
return next_gc_likely_to_collect_more; return freed_nodes;
} }
} }
if (!node->IsRetainer()) { if (!node->IsRetainer()) {
next_gc_likely_to_collect_more = true; freed_nodes++;
} }
} }
} else { } else {
for (NodeIterator it(this); !it.done(); it.Advance()) { for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) { if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute // Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more. // the freed_nodes.
continue; continue;
} }
it.node()->clear_partially_dependent(); it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) { if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) { if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above. // See the comment above.
return next_gc_likely_to_collect_more; return freed_nodes;
} }
} }
if (!it.node()->IsRetainer()) { if (!it.node()->IsRetainer()) {
next_gc_likely_to_collect_more = true; freed_nodes++;
} }
} }
} }
...@@ -686,7 +686,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing( ...@@ -686,7 +686,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
} }
} }
new_space_nodes_.Rewind(last); new_space_nodes_.Rewind(last);
return next_gc_likely_to_collect_more; return freed_nodes;
} }
......
...@@ -155,9 +155,9 @@ class GlobalHandles { ...@@ -155,9 +155,9 @@ class GlobalHandles {
static bool IsWeak(Object** location); static bool IsWeak(Object** location);
// Process pending weak handles. // Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage. // Returns the number of freed nodes.
bool PostGarbageCollectionProcessing(GarbageCollector collector, int PostGarbageCollectionProcessing(GarbageCollector collector,
GCTracer* tracer); GCTracer* tracer);
// Iterates over all strong handles. // Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v); void IterateStrongRoots(ObjectVisitor* v);
......
...@@ -61,7 +61,6 @@ Heap::Heap() ...@@ -61,7 +61,6 @@ Heap::Heap()
// Will be 4 * reserved_semispace_size_ to ensure that young // Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size. // generation can be aligned to its size.
maximum_committed_(0), maximum_committed_(0),
old_space_growing_factor_(4),
survived_since_last_expansion_(0), survived_since_last_expansion_(0),
sweep_generation_(0), sweep_generation_(0),
always_allocate_scope_depth_(0), always_allocate_scope_depth_(0),
...@@ -90,7 +89,6 @@ Heap::Heap() ...@@ -90,7 +89,6 @@ Heap::Heap()
allocation_timeout_(0), allocation_timeout_(0),
#endif // DEBUG #endif // DEBUG
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
size_of_old_gen_at_last_old_space_gc_(0),
old_gen_exhausted_(false), old_gen_exhausted_(false),
inline_allocation_disabled_(false), inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()), store_buffer_rebuilder_(store_buffer()),
...@@ -1056,7 +1054,7 @@ bool Heap::PerformGarbageCollection( ...@@ -1056,7 +1054,7 @@ bool Heap::PerformGarbageCollection(
GarbageCollector collector, GarbageCollector collector,
GCTracer* tracer, GCTracer* tracer,
const v8::GCCallbackFlags gc_callback_flags) { const v8::GCCallbackFlags gc_callback_flags) {
bool next_gc_likely_to_collect_more = false; int freed_global_handles = 0;
if (collector != SCAVENGER) { if (collector != SCAVENGER) {
PROFILE(isolate_, CodeMovingGCEvent()); PROFILE(isolate_, CodeMovingGCEvent());
...@@ -1096,12 +1094,11 @@ bool Heap::PerformGarbageCollection( ...@@ -1096,12 +1094,11 @@ bool Heap::PerformGarbageCollection(
// Perform mark-sweep with optional compaction. // Perform mark-sweep with optional compaction.
MarkCompact(tracer); MarkCompact(tracer);
sweep_generation_++; sweep_generation_++;
// Temporarily set the limit for case when PostGarbageCollectionProcessing
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects(); // allocates and triggers GC. The real limit is set at after
// PostGarbageCollectionProcessing.
old_generation_allocation_limit_ = old_generation_allocation_limit_ =
OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_); OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false; old_gen_exhausted_ = false;
} else { } else {
tracer_ = tracer; tracer_ = tracer;
...@@ -1120,7 +1117,7 @@ bool Heap::PerformGarbageCollection( ...@@ -1120,7 +1117,7 @@ bool Heap::PerformGarbageCollection(
gc_post_processing_depth_++; gc_post_processing_depth_++;
{ AllowHeapAllocation allow_allocation; { AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more = freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing( isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, tracer); collector, tracer);
} }
...@@ -1135,6 +1132,9 @@ bool Heap::PerformGarbageCollection( ...@@ -1135,6 +1132,9 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory. // Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ = amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_; amount_of_external_allocated_memory_;
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
freed_global_handles);
} }
{ GCCallbacksScope scope(this); { GCCallbacksScope scope(this);
...@@ -1153,7 +1153,7 @@ bool Heap::PerformGarbageCollection( ...@@ -1153,7 +1153,7 @@ bool Heap::PerformGarbageCollection(
} }
#endif #endif
return next_gc_likely_to_collect_more; return freed_global_handles > 0;
} }
...@@ -4989,12 +4989,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size, ...@@ -4989,12 +4989,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size,
code_range_size_ = code_range_size * MB; code_range_size_ = code_range_size * MB;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
old_space_growing_factor_ = 2;
}
configured_ = true; configured_ = true;
return true; return true;
} }
...@@ -5063,6 +5057,47 @@ int64_t Heap::PromotedExternalMemorySize() { ...@@ -5063,6 +5057,47 @@ int64_t Heap::PromotedExternalMemorySize() {
} }
intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles) {
const int kMaxHandles = 1000;
const int kMinHandles = 100;
double min_factor = 1.1;
double max_factor = 4;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
max_factor = 2;
}
// If there are many freed global handles, then the next full GC will
// likely collect a lot of garbage. Choose the heap growing factor
// depending on freed global handles.
// TODO(ulan, hpayer): Take into account mutator utilization.
double factor;
if (freed_global_handles <= kMinHandles) {
factor = max_factor;
} else if (freed_global_handles >= kMaxHandles) {
factor = min_factor;
} else {
// Compute factor using linear interpolation between points
// (kMinHandles, max_factor) and (kMaxHandles, min_factor).
factor = max_factor -
(freed_global_handles - kMinHandles) * (max_factor - min_factor) /
(kMaxHandles - kMinHandles);
}
if (FLAG_stress_compaction ||
mark_compact_collector()->reduce_memory_footprint_) {
factor = min_factor;
}
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
void Heap::EnableInlineAllocation() { void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return; if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false; inline_allocation_disabled_ = false;
......
...@@ -1077,15 +1077,8 @@ class Heap { ...@@ -1077,15 +1077,8 @@ class Heap {
static const int kMaxExecutableSizeHugeMemoryDevice = static const int kMaxExecutableSizeHugeMemoryDevice =
700 * kPointerMultiplier; 700 * kPointerMultiplier;
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) { intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
intptr_t limit = FLAG_stress_compaction int freed_global_handles);
? old_gen_size + old_gen_size / 10
: old_gen_size * old_space_growing_factor_;
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
// Indicates whether inline bump-pointer allocation has been disabled. // Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; } bool inline_allocation_disabled() { return inline_allocation_disabled_; }
...@@ -1514,11 +1507,6 @@ class Heap { ...@@ -1514,11 +1507,6 @@ class Heap {
intptr_t max_executable_size_; intptr_t max_executable_size_;
intptr_t maximum_committed_; intptr_t maximum_committed_;
// The old space growing factor is used in the old space heap growing
// strategy. The new old space size is the current old space size times
// old_space_growing_factor_.
int old_space_growing_factor_;
// For keeping track of how much data has survived // For keeping track of how much data has survived
// scavenge since last new space expansion. // scavenge since last new space expansion.
int survived_since_last_expansion_; int survived_since_last_expansion_;
...@@ -1598,9 +1586,6 @@ class Heap { ...@@ -1598,9 +1586,6 @@ class Heap {
// generation and on every allocation in large object space. // generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_; intptr_t old_generation_allocation_limit_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
// Indicates that an allocation has failed in the old generation since the // Indicates that an allocation has failed in the old generation since the
// last GC. // last GC.
bool old_gen_exhausted_; bool old_gen_exhausted_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment