Commit dae5fc6f authored by antonm@chromium.org's avatar antonm@chromium.org

Attempt to collect more garbage before panicking with out of memory.

Currently weak handles retain an object for another GC round (oftem times,
major GC round.)  Instrumenting Chromium shows that navigation leaves
many global objects which are only collected in next go.  Let's
attempt to collect more garbage when approacing OOM condition.

Review URL: http://codereview.chromium.org/3327021

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5455 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c3f04bf4
......@@ -372,13 +372,14 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
int post_gc_processing_count = 0;
void GlobalHandles::PostGarbageCollectionProcessing() {
bool GlobalHandles::PostGarbageCollectionProcessing() {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
// At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
bool weak_callback_invoked = false;
Node** p = &head_;
while (*p != NULL) {
if ((*p)->PostGarbageCollectionProcessing()) {
......@@ -389,6 +390,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
// restart the processing).
break;
}
weak_callback_invoked = true;
}
if ((*p)->state_ == Node::DESTROYED) {
// Delete the link.
......@@ -407,6 +409,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
return weak_callback_invoked;
}
......
......@@ -95,8 +95,9 @@ class GlobalHandles : public AllStatic {
// Tells whether global handle is weak.
static bool IsWeak(Object** location);
// Process pending weak handles.
static void PostGarbageCollectionProcessing();
// Process pending weak handles. Returns true if any weak handle
// callback has been invoked.
static bool PostGarbageCollectionProcessing();
// Iterates over all strong handles.
static void IterateStrongRoots(ObjectVisitor* v);
......
......@@ -35,6 +35,16 @@
namespace v8 {
namespace internal {
void Heap::UpdateOldSpaceLimits() {
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
}
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
......@@ -403,7 +413,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
} \
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
Counters::gc_last_resort_from_handles.Increment(); \
Heap::CollectAllGarbage(false); \
Heap::CollectAllAvailableGarbage(); \
{ \
AlwaysAllocateScope __scope__; \
__object__ = FUNCTION_CALL; \
......
......@@ -55,7 +55,6 @@ namespace internal {
String* Heap::hidden_symbol_;
Object* Heap::roots_[Heap::kRootListLength];
NewSpace Heap::new_space_;
OldSpace* Heap::old_pointer_space_ = NULL;
OldSpace* Heap::old_data_space_ = NULL;
......@@ -64,9 +63,6 @@ MapSpace* Heap::map_space_ = NULL;
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
static const int kMinimumPromotionLimit = 2*MB;
static const int kMinimumAllocationLimit = 8*MB;
int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
......@@ -405,17 +401,26 @@ void Heap::GarbageCollectionEpilogue() {
}
void Heap::CollectAllGarbage(bool force_compaction) {
void Heap::CollectAllGarbage(bool force_compaction,
CollectionPolicy collectionPolicy) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
MarkCompactCollector::SetForceCompaction(force_compaction);
CollectGarbage(0, OLD_POINTER_SPACE);
CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy);
MarkCompactCollector::SetForceCompaction(false);
}
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
void Heap::CollectAllAvailableGarbage() {
CompilationCache::Clear();
CollectAllGarbage(true, AGGRESSIVE);
}
bool Heap::CollectGarbage(int requested_size,
AllocationSpace space,
CollectionPolicy collectionPolicy) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
......@@ -442,7 +447,7 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
? &Counters::gc_scavenger
: &Counters::gc_compactor;
rate->Start();
PerformGarbageCollection(space, collector, &tracer);
PerformGarbageCollection(collector, &tracer, collectionPolicy);
rate->Stop();
GarbageCollectionEpilogue();
......@@ -475,7 +480,7 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
void Heap::PerformScavenge() {
GCTracer tracer;
PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
PerformGarbageCollection(SCAVENGER, &tracer, NORMAL);
}
......@@ -664,9 +669,9 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
survival_rate_ = survival_rate;
}
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer) {
void Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer,
CollectionPolicy collectionPolicy) {
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
......@@ -696,25 +701,45 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
UpdateSurvivalRateTrend(start_new_space_size);
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_promotion_limit_ *= 2;
old_gen_allocation_limit_ *= 2;
UpdateOldSpaceLimits();
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
// has been invoked, we rerun major GC to release objects which become
// garbage.
if (collectionPolicy == AGGRESSIVE) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
if (!GlobalHandles::PostGarbageCollectionProcessing()) break;
}
MarkCompact(tracer);
// Weak handle callbacks can allocate data, so keep limits correct.
UpdateOldSpaceLimits();
}
} else {
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
// Stable high survival rates of young objects both during partial and
// full collection indicate that mutator is either building or modifying
// a structure with a long lifetime.
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
old_gen_promotion_limit_ *= 2;
old_gen_allocation_limit_ *= 2;
}
}
old_gen_exhausted_ = false;
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
}
} else {
tracer_ = tracer;
Scavenge();
......@@ -725,12 +750,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
Counters::objs_since_last_young.Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
}
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();
......
......@@ -687,13 +687,21 @@ class Heap : public AllStatic {
static void GarbageCollectionPrologue();
static void GarbageCollectionEpilogue();
enum CollectionPolicy { NORMAL, AGGRESSIVE };
// Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection.
static bool CollectGarbage(int required_space, AllocationSpace space);
static bool CollectGarbage(int required_space,
AllocationSpace space,
CollectionPolicy collectionPolicy = NORMAL);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
static void CollectAllGarbage(bool force_compaction);
static void CollectAllGarbage(bool force_compaction,
CollectionPolicy collectionPolicy = NORMAL);
// Last hope GC, should try to squeeze as much as possible.
static void CollectAllAvailableGarbage();
// Notify the heap that a context has been disposed.
static int NotifyContextDisposed() { return ++contexts_disposed_; }
......@@ -1214,9 +1222,14 @@ class Heap : public AllStatic {
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Performs garbage collection
static void PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer);
static void PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer,
CollectionPolicy collectionPolicy);
static const int kMinimumPromotionLimit = 2 * MB;
static const int kMinimumAllocationLimit = 8 * MB;
inline static void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment