Commit cb27d095 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Fix bugs introduced by r4475:

- RelinkPageListInChunkOrder might relink unused pages into the middle of a sequence of used pages. Filler objects should be placed at the beginning of such unused pages otherwise generic iterators (e.g. HeapObjectIterator) would not handle them correctly. 
- ObjectAreaEnd() should not be used as an allocation limit for pages from FixedSpace. Pages in such spaces do not use top page_extra_ bytes of object area.

TBR=ager@chromium.org
Review URL: http://codereview.chromium.org/1700005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4476 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3fa49f8b
...@@ -1056,7 +1056,6 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( ...@@ -1056,7 +1056,6 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
if (p->WasInUseBeforeMC()) {
// The offset of each live object in the page from the first live object // The offset of each live object in the page from the first live object
// in the page. // in the page.
int offset = 0; int offset = 0;
...@@ -1066,11 +1065,6 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( ...@@ -1066,11 +1065,6 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
p->ObjectAreaStart(), p->ObjectAreaStart(),
p->AllocationTop(), p->AllocationTop(),
&offset); &offset);
} else {
// Mark whole unused page as a free region.
EncodeFreeRegion(p->ObjectAreaStart(),
p->AllocationTop() - p->ObjectAreaStart());
}
} }
} }
...@@ -1397,17 +1391,17 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { ...@@ -1397,17 +1391,17 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
} }
if (new_allocation_top != NULL) { if (new_allocation_top != NULL) {
#ifdef DEBUG
Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
ASSERT(((first_empty_page == NULL) && ASSERT(((first_empty_page == NULL) &&
(new_allocation_top_page == space->AllocationTopPage())) || (new_allocation_top_page == space->AllocationTopPage())) ||
((first_empty_page != NULL) && (last_free_size > 0) && ((first_empty_page != NULL) && (last_free_size > 0) &&
(new_allocation_top_page == prec_first_empty_page)) || (new_allocation_top_page == prec_first_empty_page)) ||
((first_empty_page != NULL) && (last_free_size == 0) && ((first_empty_page != NULL) && (last_free_size == 0) &&
(new_allocation_top_page == first_empty_page))); (new_allocation_top_page == first_empty_page)));
#endif
space->SetTop(new_allocation_top, space->SetTop(new_allocation_top);
new_allocation_top_page->ObjectAreaEnd());
} }
} }
......
...@@ -1039,7 +1039,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1039,7 +1039,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// The next page will be above the allocation top. // The next page will be above the allocation top.
above_allocation_top = true; above_allocation_top = true;
} else { } else {
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_); ASSERT(top == PageAllocationLimit(current_page));
} }
// It should be packed with objects from the bottom to the top. // It should be packed with objects from the bottom to the top.
...@@ -1977,12 +1977,12 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) { ...@@ -1977,12 +1977,12 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) { if (will_compact) {
// MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
// to skip unused pages. Update flag value for all pages in space. // to skip unused pages. Update flag value for all pages in space.
PageIterator it(this, PageIterator::ALL_PAGES); PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage(); Page* last_in_use = AllocationTopPage();
bool in_use = true; bool in_use = true;
while (it.has_next()) { while (all_pages_iterator.has_next()) {
Page* p = it.next(); Page* p = all_pages_iterator.next();
p->SetWasInUseBeforeMC(in_use); p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) { if (p == last_in_use) {
// We passed a page containing allocation top. All consequent // We passed a page containing allocation top. All consequent
...@@ -2005,7 +2005,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) { ...@@ -2005,7 +2005,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
// used page so various object iterators will continue to work properly. // used page so various object iterators will continue to work properly.
int size_in_bytes = int size_in_bytes =
last_in_use->ObjectAreaEnd() - last_in_use->AllocationTop(); PageAllocationLimit(last_in_use) - last_in_use->AllocationTop();
if (size_in_bytes > 0) { if (size_in_bytes > 0) {
// There is still some space left on this page. Create a fake // There is still some space left on this page. Create a fake
...@@ -2015,16 +2015,28 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) { ...@@ -2015,16 +2015,28 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
FreeListNode* node = FreeListNode* node =
FreeListNode::FromAddress(last_in_use->AllocationTop()); FreeListNode::FromAddress(last_in_use->AllocationTop());
node->set_size(last_in_use->ObjectAreaEnd() - node->set_size(size_in_bytes);
last_in_use->AllocationTop());
} }
// New last in use page was in the middle of the list before // New last in use page was in the middle of the list before
// sorting so it full. // sorting so it full.
SetTop(new_last_in_use->AllocationTop(), SetTop(new_last_in_use->AllocationTop());
new_last_in_use->AllocationTop());
ASSERT(AllocationTopPage() == new_last_in_use); ASSERT(AllocationTopPage() == new_last_in_use);
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
}
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
while (pages_in_use_iterator.has_next()) {
Page* p = pages_in_use_iterator.next();
if (!p->WasInUseBeforeMC()) {
// Empty page is in the middle of a sequence of used pages.
// Create a fake object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page correctly.
FreeListNode* node =
FreeListNode::FromAddress(p->ObjectAreaStart());
node->set_size(PageAllocationLimit(p) - p->ObjectAreaStart());
}
} }
page_list_is_chunk_ordered_ = true; page_list_is_chunk_ordered_ = true;
...@@ -2544,7 +2556,7 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2544,7 +2556,7 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) { int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid()); ASSERT(current_page->next_page()->is_valid());
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_); ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes); ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
accounting_stats_.WasteBytes(page_extra_); accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page()); SetAllocationInfo(&allocation_info_, current_page->next_page());
......
...@@ -927,7 +927,14 @@ class PagedSpace : public Space { ...@@ -927,7 +927,14 @@ class PagedSpace : public Space {
// Prepares for a mark-compact GC. // Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact); virtual void PrepareForMarkCompact(bool will_compact);
virtual Address PageAllocationTop(Page* page) = 0; // The top of allocation in a page in this space. Undefined if page is unused.
Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: PageAllocationLimit(page);
}
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) = 0;
// Current capacity without growing (Size() + Available() + Waste()). // Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); } int Capacity() { return accounting_stats_.Capacity(); }
...@@ -970,9 +977,9 @@ class PagedSpace : public Space { ...@@ -970,9 +977,9 @@ class PagedSpace : public Space {
void FreePages(Page* prev, Page* last); void FreePages(Page* prev, Page* last);
// Set space allocation info. // Set space allocation info.
void SetTop(Address top, Address limit) { void SetTop(Address top) {
allocation_info_.top = top; allocation_info_.top = top;
allocation_info_.limit = limit; allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
...@@ -1724,9 +1731,9 @@ class OldSpace : public PagedSpace { ...@@ -1724,9 +1731,9 @@ class OldSpace : public PagedSpace {
// pointer). // pointer).
int AvailableFree() { return free_list_.available(); } int AvailableFree() { return free_list_.available(); }
// The top of allocation in a page in this space. Undefined if page is unused. // The limit of allocation for a page in this space.
virtual Address PageAllocationTop(Page* page) { virtual Address PageAllocationLimit(Page* page) {
return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd(); return page->ObjectAreaEnd();
} }
// Give a block of memory to the space's free list. It might be added to // Give a block of memory to the space's free list. It might be added to
...@@ -1792,10 +1799,9 @@ class FixedSpace : public PagedSpace { ...@@ -1792,10 +1799,9 @@ class FixedSpace : public PagedSpace {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
} }
// The top of allocation in a page in this space. Undefined if page is unused. // The limit of allocation for a page in this space.
virtual Address PageAllocationTop(Page* page) { virtual Address PageAllocationLimit(Page* page) {
return page == TopPageOf(allocation_info_) ? top() return page->ObjectAreaEnd() - page_extra_;
: page->ObjectAreaEnd() - page_extra_;
} }
int object_size_in_bytes() { return object_size_in_bytes_; } int object_size_in_bytes() { return object_size_in_bytes_; }
......
...@@ -830,7 +830,7 @@ TEST(LargeObjectSpaceContains) { ...@@ -830,7 +830,7 @@ TEST(LargeObjectSpaceContains) {
} }
CHECK(bytes_to_page > FixedArray::kHeaderSize); CHECK(bytes_to_page > FixedArray::kHeaderSize);
intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags; int* flags_ptr = &Page::FromAddress(next_page)->flags;
Address flags_addr = reinterpret_cast<Address>(flags_ptr); Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate = int bytes_to_allocate =
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment