Commit 19dc35c1 authored by antonm@chromium.org's avatar antonm@chromium.org

Force relinking of paged space if first attempt to recommit from space fails.

That could improve chances for commit success as currently,
if we moved free pages out of order, we cannot shrink spaces.
However, when we experience problems commiting from space back, we should
use most of resources at our disposal.

Also get rid of currently unused parameter to DeallocateFunction.

Review URL: http://codereview.chromium.org/3260001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5372 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 79a219cf
......@@ -568,6 +568,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Try shrinking and try again.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->RelinkPageListInChunkOrder(true);
}
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
......
......@@ -1629,7 +1629,7 @@ static void SweepNewSpace(NewSpace* space) {
}
static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
......@@ -1670,10 +1670,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start,
static_cast<int>(current - free_start),
true,
false);
space->DeallocateBlock(free_start,
static_cast<int>(current - free_start),
true);
is_previous_alive = true;
}
} else {
......@@ -1703,7 +1702,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false, true);
space->DeallocateBlock(free_start, size_in_bytes, false);
}
}
} else {
......@@ -1719,7 +1718,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (last_free_size > 0) {
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
dealloc(last_free_start, last_free_size, true, true);
space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
......@@ -1750,7 +1749,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false, true);
space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
......@@ -1771,61 +1770,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
}
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
......@@ -2090,14 +2034,14 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::old_pointer_space());
SweepSpace(Heap::old_data_space());
SweepSpace(Heap::code_space());
SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space());
}
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
......
......@@ -36,15 +36,6 @@ namespace internal {
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function for non-live blocks in the old generation.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
// Forward declarations.
class RootMarkingVisitor;
class MarkingVisitor;
......@@ -329,33 +320,6 @@ class MarkCompactCollector: public AllStatic {
static int IterateLiveObjectsInRange(Address start, Address end,
HeapObjectCallback size_func);
// Callback functions for deallocating non-live blocks in the old
// generation.
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
......
......@@ -2002,77 +2002,88 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
}
void PagedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
// MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
// to skip unused pages. Update flag value for all pages in space.
PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage();
bool in_use = true;
while (all_pages_iterator.has_next()) {
Page* p = all_pages_iterator.next();
p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) {
// We passed a page containing allocation top. All consequent
// pages are not used.
in_use = false;
void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
const bool add_to_freelist = true;
// Mark used and unused pages to properly fill unused pages
// after reordering.
PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage();
bool in_use = true;
while (all_pages_iterator.has_next()) {
Page* p = all_pages_iterator.next();
p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) {
// We passed a page containing allocation top. All consequent
// pages are not used.
in_use = false;
}
}
if (page_list_is_chunk_ordered_) return;
Page* new_last_in_use = Page::FromAddress(NULL);
MemoryAllocator::RelinkPageListInChunkOrder(this,
&first_page_,
&last_page_,
&new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
if (size_in_bytes > 0) {
Address start = last_in_use->AllocationTop();
if (deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
Heap::CreateFillerObjectAt(start, size_in_bytes);
}
}
if (!page_list_is_chunk_ordered_) {
Page* new_last_in_use = Page::FromAddress(NULL);
MemoryAllocator::RelinkPageListInChunkOrder(this,
&first_page_,
&last_page_,
&new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
if (size_in_bytes > 0) {
// There is still some space left on this page. Create a fake
// object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page
// correctly.
Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
size_in_bytes);
}
// New last in use page was in the middle of the list before
// sorting so it full.
SetTop(new_last_in_use->AllocationTop());
// New last in use page was in the middle of the list before
// sorting so it full.
SetTop(new_last_in_use->AllocationTop());
ASSERT(AllocationTopPage() == new_last_in_use);
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
}
ASSERT(AllocationTopPage() == new_last_in_use);
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
}
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
while (pages_in_use_iterator.has_next()) {
Page* p = pages_in_use_iterator.next();
if (!p->WasInUseBeforeMC()) {
// Empty page is in the middle of a sequence of used pages.
// Allocate it as a whole and deallocate immediately.
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
while (pages_in_use_iterator.has_next()) {
Page* p = pages_in_use_iterator.next();
if (!p->WasInUseBeforeMC()) {
// Empty page is in the middle of a sequence of used pages.
// Create a fake object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page correctly.
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
p->SetAllocationWatermark(p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
p->SetAllocationWatermark(p->ObjectAreaStart());
Address start = p->ObjectAreaStart();
if (deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
Heap::CreateFillerObjectAt(start, size_in_bytes);
}
page_list_is_chunk_ordered_ = true;
}
}
page_list_is_chunk_ordered_ = true;
}
void PagedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
RelinkPageListInChunkOrder(false);
}
}
......@@ -2207,6 +2218,13 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
}
void OldSpace::DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
Free(start, size_in_bytes, add_to_freelist);
}
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
......@@ -2481,6 +2499,21 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
}
void FixedSpace::DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist) {
// Free-list elements in fixed space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Free(a, add_to_freelist);
}
}
#ifdef DEBUG
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
......
......@@ -1040,6 +1040,11 @@ class PagedSpace : public Space {
// Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last);
// Deallocates a block.
virtual void DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist) = 0;
// Set space allocation info.
void SetTop(Address top) {
allocation_info_.top = top;
......@@ -1098,6 +1103,8 @@ class PagedSpace : public Space {
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
void RelinkPageListInChunkOrder(bool deallocate_blocks);
protected:
// Maximum capacity of this space.
int max_capacity_;
......@@ -1815,6 +1822,10 @@ class OldSpace : public PagedSpace {
}
}
virtual void DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
// Prepare for full garbage collection. Resets the relocation pointer and
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
......@@ -1889,6 +1900,9 @@ class FixedSpace : public PagedSpace {
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
virtual void DeallocateBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment