Check the growth of the old generation before expanding the paged

spaces (during normal allocation) and when allocating large objects.
If the promotion limit is reached, fail allocation to trigger a
garbage collection.
Review URL: http://codereview.chromium.org/8657

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@632 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a74efbd1
......@@ -64,7 +64,12 @@ OldSpace* Heap::code_space_ = NULL;
MapSpace* Heap::map_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
int Heap::promoted_space_limit_ = 0;
static const int kMinimumPromotionLimit = 2*MB;
static const int kMinimumAllocationLimit = 8*MB;
int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
int Heap::old_gen_exhausted_ = false;
int Heap::amount_of_external_allocated_memory_ = 0;
......@@ -138,8 +143,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
}
// Is enough data promoted to justify a global GC?
if (PromotedSpaceSize() + PromotedExternalMemorySize()
> promoted_space_limit_) {
if (OldGenerationPromotionLimitReached()) {
Counters::gc_compactor_caused_by_promoted_data.Increment();
return MARK_COMPACTOR;
}
......@@ -360,9 +364,11 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
if (collector == MARK_COMPACTOR) {
MarkCompact(tracer);
int promoted_space_size = PromotedSpaceSize();
promoted_space_limit_ =
promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
old_gen_exhausted_ = false;
// If we have used the mark-compact collector to collect the new
......@@ -2291,7 +2297,8 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
PrintF("mark-compact GC : %d\n", mc_count_);
PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
......
......@@ -744,6 +744,20 @@ class Heap : public AllStatic {
// Allocate unitialized fixed array (pretenure == NON_TENURE).
static Object* AllocateRawFixedArray(int length);
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
static bool OldGenerationPromotionLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_promotion_limit_;
}
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
static bool OldGenerationAllocationLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_allocation_limit_;
}
private:
static int semispace_size_;
static int initial_semispace_size_;
......@@ -785,8 +799,15 @@ class Heap : public AllStatic {
static bool disallow_allocation_failure_;
#endif // DEBUG
// Promotion limit that trigger a global GC
static int promoted_space_limit_;
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
static int old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on
// every allocation in large object space.
static int old_gen_allocation_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
......
......@@ -1530,8 +1530,14 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
return HeapObject::cast(result);
}
// Free list allocation failed and there is no next page. Try to expand
// the space and allocate in the new next page.
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
if (Heap::OldGenerationAllocationLimitReached()) {
return NULL;
}
// Try to expand the space and allocate in the new next page.
ASSERT(!current_page->next_page()->is_valid());
if (Expand(current_page)) {
return AllocateInNextPage(current_page, size_in_bytes);
......@@ -2009,8 +2015,14 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
}
}
// Free list allocation failed and there is no next page. Try to expand
// the space and allocate in the new next page.
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
if (Heap::OldGenerationAllocationLimitReached()) {
return NULL;
}
// Try to expand the space and allocate in the new next page.
ASSERT(!current_page->next_page()->is_valid());
if (Expand(current_page)) {
return AllocateInNextPage(current_page, size_in_bytes);
......@@ -2236,6 +2248,13 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
int object_size,
Executability executable) {
ASSERT(0 < object_size && object_size <= requested_size);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (Heap::OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(requested_size, identity());
}
size_t chunk_size;
LargeObjectChunk* chunk =
LargeObjectChunk::New(requested_size, &chunk_size, executable);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment