Commit 6a5ab4b2 authored by ulan@chromium.org's avatar ulan@chromium.org

Grow heap slower if GC freed many global handles.

BUG=263503
LOG=Y
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/352763002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22003 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a2f85d6c
......@@ -611,21 +611,21 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
}
bool GlobalHandles::PostGarbageCollectionProcessing(
int GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
bool next_gc_likely_to_collect_more = false;
int freed_nodes = 0;
if (collector == SCAVENGER) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more.
// the freed_nodes.
continue;
}
// Skip dependent handles. Their weak callbacks might expect to be
......@@ -641,29 +641,29 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// PostGarbageCollection processing. The current node might
// have been deleted in that round, so we need to bail out (or
// restart the processing).
return next_gc_likely_to_collect_more;
return freed_nodes;
}
}
if (!node->IsRetainer()) {
next_gc_likely_to_collect_more = true;
freed_nodes++;
}
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more.
// the freed_nodes.
continue;
}
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
return next_gc_likely_to_collect_more;
return freed_nodes;
}
}
if (!it.node()->IsRetainer()) {
next_gc_likely_to_collect_more = true;
freed_nodes++;
}
}
}
......@@ -686,7 +686,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
}
new_space_nodes_.Rewind(last);
return next_gc_likely_to_collect_more;
return freed_nodes;
}
......
......@@ -155,9 +155,9 @@ class GlobalHandles {
static bool IsWeak(Object** location);
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
bool PostGarbageCollectionProcessing(GarbageCollector collector,
GCTracer* tracer);
// Returns the number of freed nodes.
int PostGarbageCollectionProcessing(GarbageCollector collector,
GCTracer* tracer);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
......
......@@ -61,7 +61,6 @@ Heap::Heap()
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
maximum_committed_(0),
old_space_growing_factor_(4),
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
......@@ -90,7 +89,6 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
size_of_old_gen_at_last_old_space_gc_(0),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
......@@ -1056,7 +1054,7 @@ bool Heap::PerformGarbageCollection(
GarbageCollector collector,
GCTracer* tracer,
const v8::GCCallbackFlags gc_callback_flags) {
bool next_gc_likely_to_collect_more = false;
int freed_global_handles = 0;
if (collector != SCAVENGER) {
PROFILE(isolate_, CodeMovingGCEvent());
......@@ -1096,12 +1094,11 @@ bool Heap::PerformGarbageCollection(
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
sweep_generation_++;
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
// Temporarily set the limit for case when PostGarbageCollectionProcessing
// allocates and triggers GC. The real limit is set at after
// PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
......@@ -1120,7 +1117,7 @@ bool Heap::PerformGarbageCollection(
gc_post_processing_depth_++;
{ AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, tracer);
}
......@@ -1135,6 +1132,9 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
freed_global_handles);
}
{ GCCallbacksScope scope(this);
......@@ -1153,7 +1153,7 @@ bool Heap::PerformGarbageCollection(
}
#endif
return next_gc_likely_to_collect_more;
return freed_global_handles > 0;
}
......@@ -4989,12 +4989,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size,
code_range_size_ = code_range_size * MB;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
old_space_growing_factor_ = 2;
}
configured_ = true;
return true;
}
......@@ -5063,6 +5057,47 @@ int64_t Heap::PromotedExternalMemorySize() {
}
intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles) {
const int kMaxHandles = 1000;
const int kMinHandles = 100;
double min_factor = 1.1;
double max_factor = 4;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
max_factor = 2;
}
// If there are many freed global handles, then the next full GC will
// likely collect a lot of garbage. Choose the heap growing factor
// depending on freed global handles.
// TODO(ulan, hpayer): Take into account mutator utilization.
double factor;
if (freed_global_handles <= kMinHandles) {
factor = max_factor;
} else if (freed_global_handles >= kMaxHandles) {
factor = min_factor;
} else {
// Compute factor using linear interpolation between points
// (kMinHandles, max_factor) and (kMaxHandles, min_factor).
factor = max_factor -
(freed_global_handles - kMinHandles) * (max_factor - min_factor) /
(kMaxHandles - kMinHandles);
}
if (FLAG_stress_compaction ||
mark_compact_collector()->reduce_memory_footprint_) {
factor = min_factor;
}
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false;
......
......@@ -1077,15 +1077,8 @@ class Heap {
static const int kMaxExecutableSizeHugeMemoryDevice =
700 * kPointerMultiplier;
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
intptr_t limit = FLAG_stress_compaction
? old_gen_size + old_gen_size / 10
: old_gen_size * old_space_growing_factor_;
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
......@@ -1514,11 +1507,6 @@ class Heap {
intptr_t max_executable_size_;
intptr_t maximum_committed_;
// The old space growing factor is used in the old space heap growing
// strategy. The new old space size is the current old space size times
// old_space_growing_factor_.
int old_space_growing_factor_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
int survived_since_last_expansion_;
......@@ -1598,9 +1586,6 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment