Commit e2f2c77e authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Be more willing to expand old space when evacuating new space at the end of

a mark-sweep.  We have a soft limit on old space size, which is designed to
trigger an old-space collection when we hit it.  Unfortunately although the
soft limit had already triggered an old space collection, the soft limit was
preventing objects from new space from being promoted.  For every promotion
candidate we were checking 3 different ways to allocate in old space before
giving up and putting the object in the other semispace.  This change allows
the promoted objects to go to old space and also makes us more eager to
sweep a page before trying other ways to find space for an object.
Review URL: http://codereview.chromium.org/8748005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10092 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 83aa4b6e
...@@ -2582,6 +2582,10 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object, ...@@ -2582,6 +2582,10 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
void MarkCompactCollector::EvacuateNewSpace() { void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
AlwaysAllocateScope scope;
heap()->CheckNewSpaceExpansionCriteria(); heap()->CheckNewSpaceExpansionCriteria();
NewSpace* new_space = heap()->new_space(); NewSpace* new_space = heap()->new_space();
......
...@@ -2142,29 +2142,22 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { ...@@ -2142,29 +2142,22 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed. // Allocation in this space has failed.
// Free list allocation failed and there is no next page. Fail if we have // If there are unswept pages advance lazy sweeper then sweep one page before
// hit the old generation size limit that should cause a garbage // allocating a new page.
// collection.
if (!heap()->always_allocate() &&
heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
// If there are unswept pages advance lazy sweeper.
if (first_unswept_page_->is_valid()) { if (first_unswept_page_->is_valid()) {
AdvanceSweeper(size_in_bytes); AdvanceSweeper(size_in_bytes);
// Retry the free list allocation. // Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes); HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object; if (object != NULL) return object;
}
if (!IsSweepingComplete()) { // Free list allocation failed and there is no next page. Fail if we have
AdvanceSweeper(kMaxInt); // hit the old generation size limit that should cause a garbage
// collection.
// Retry the free list allocation. if (!heap()->always_allocate() &&
object = free_list_.Allocate(size_in_bytes); heap()->OldGenerationAllocationLimitReached()) {
if (object != NULL) return object; return NULL;
}
} }
// Try to expand the space and allocate in the new next page. // Try to expand the space and allocate in the new next page.
...@@ -2172,6 +2165,16 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2172,6 +2165,16 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
return free_list_.Allocate(size_in_bytes); return free_list_.Allocate(size_in_bytes);
} }
// Last ditch, sweep all the remaining pages to try to find space. This may
// cause a pause.
if (!IsSweepingComplete()) {
AdvanceSweeper(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
}
// Finally, fail. // Finally, fail.
return NULL; return NULL;
} }
......
...@@ -1650,7 +1650,8 @@ class PagedSpace : public Space { ...@@ -1650,7 +1650,8 @@ class PagedSpace : public Space {
Page* first_unswept_page_; Page* first_unswept_page_;
// Expands the space by allocating a fixed number of pages. Returns false if // Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS. // it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
bool Expand(); bool Expand();
// Generic fast case allocation function that tries linear allocation at the // Generic fast case allocation function that tries linear allocation at the
......
...@@ -142,9 +142,6 @@ TEST(NoPromotion) { ...@@ -142,9 +142,6 @@ TEST(NoPromotion) {
// Call mark compact GC, and it should pass. // Call mark compact GC, and it should pass.
HEAP->CollectGarbage(OLD_POINTER_SPACE); HEAP->CollectGarbage(OLD_POINTER_SPACE);
// array should not be promoted because the old space is full.
CHECK(HEAP->InSpace(*array, NEW_SPACE));
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment