Commit 3fa49f8b authored by vegorov@chromium.org's avatar vegorov@chromium.org

Put empty pages discovered during sweeping to the end of the list of pages

instead of adding them to the free list.
Review URL: http://codereview.chromium.org/1683001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5fab20ed
...@@ -1055,15 +1055,22 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( ...@@ -1055,15 +1055,22 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
PageIterator it(space, PageIterator::PAGES_IN_USE); PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
// The offset of each live object in the page from the first live object
// in the page. if (p->WasInUseBeforeMC()) {
int offset = 0; // The offset of each live object in the page from the first live object
EncodeForwardingAddressesInRange<Alloc, // in the page.
EncodeForwardingAddressInPagedSpace, int offset = 0;
ProcessNonLive>( EncodeForwardingAddressesInRange<Alloc,
p->ObjectAreaStart(), EncodeForwardingAddressInPagedSpace,
p->AllocationTop(), ProcessNonLive>(
&offset); p->ObjectAreaStart(),
p->AllocationTop(),
&offset);
} else {
// Mark whole unused page as a free region.
EncodeFreeRegion(p->ObjectAreaStart(),
p->AllocationTop() - p->ObjectAreaStart());
}
} }
} }
...@@ -1277,6 +1284,23 @@ static void SweepNewSpace(NewSpace* space) { ...@@ -1277,6 +1284,23 @@ static void SweepNewSpace(NewSpace* space) {
static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
PageIterator it(space, PageIterator::PAGES_IN_USE); PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
// of pages without live objects and free them (instead of putting them on
// the free list).
Page* prev = NULL; // Page preceding current.
Page* first_empty_page = NULL; // First empty page in a sequence.
Page* prec_first_empty_page = NULL; // Page preceding first empty page.
// If last used page of space ends with a sequence of dead objects
// we can adjust allocation top instead of puting this free area into
// the free list. Thus during sweeping we keep track of such areas
// and defer their deallocation until the sweeping of the next page
// is done: if one of the next pages contains live objects we have
// to put such area into the free list.
Address last_free_start = NULL;
int last_free_size = 0;
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
...@@ -1291,8 +1315,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { ...@@ -1291,8 +1315,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (object->IsMarked()) { if (object->IsMarked()) {
object->ClearMark(); object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count(); MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live. if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, static_cast<int>(current - free_start)); dealloc(free_start, static_cast<int>(current - free_start), true);
is_previous_alive = true; is_previous_alive = true;
} }
} else { } else {
...@@ -1306,39 +1331,112 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { ...@@ -1306,39 +1331,112 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// loop. // loop.
} }
// If the last region was not live we need to deallocate from bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
// free_start to the allocation top in the page. || (!is_previous_alive && free_start == p->ObjectAreaStart());
if (!is_previous_alive) {
int free_size = static_cast<int>(p->AllocationTop() - free_start); if (page_is_empty) {
if (free_size > 0) { // This page is empty. Check whether we are in the middle of
dealloc(free_start, free_size); // sequence of empty pages and start one if not.
if (first_empty_page == NULL) {
first_empty_page = p;
prec_first_empty_page = prev;
}
if (!is_previous_alive) {
// There are dead objects on this page. Update space accounting stats
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false);
}
}
} else {
// This page is not empty. Sequence of empty pages ended on the previous
// one.
if (first_empty_page != NULL) {
space->FreePages(prec_first_empty_page, prev);
prec_first_empty_page = first_empty_page = NULL;
}
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
dealloc(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
// If the last region of this page was not live we remember it.
if (!is_previous_alive) {
ASSERT(last_free_size == 0);
last_free_size = static_cast<int>(p->AllocationTop() - free_start);
last_free_start = free_start;
} }
} }
prev = p;
}
// We reached end of space. See if we need to adjust allocation top.
Address new_allocation_top = NULL;
if (first_empty_page != NULL) {
// Last used pages in space are empty. We can move allocation top backwards
// to the beginning of first empty page.
ASSERT(prev == space->AllocationTopPage());
new_allocation_top = first_empty_page->ObjectAreaStart();
}
if (last_free_size > 0) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
if (new_allocation_top != NULL) {
Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
ASSERT(((first_empty_page == NULL) &&
(new_allocation_top_page == space->AllocationTopPage())) ||
((first_empty_page != NULL) && (last_free_size > 0) &&
(new_allocation_top_page == prec_first_empty_page)) ||
((first_empty_page != NULL) && (last_free_size == 0) &&
(new_allocation_top_page == first_empty_page)));
space->SetTop(new_allocation_top,
new_allocation_top_page->ObjectAreaEnd());
} }
} }
void MarkCompactCollector::DeallocateOldPointerBlock(Address start, void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes) { int size_in_bytes,
bool add_to_freelist) {
Heap::ClearRSetRange(start, size_in_bytes); Heap::ClearRSetRange(start, size_in_bytes);
Heap::old_pointer_space()->Free(start, size_in_bytes); Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
} }
void MarkCompactCollector::DeallocateOldDataBlock(Address start, void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes) { int size_in_bytes,
Heap::old_data_space()->Free(start, size_in_bytes); bool add_to_freelist) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
} }
void MarkCompactCollector::DeallocateCodeBlock(Address start, void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes) { int size_in_bytes,
Heap::code_space()->Free(start, size_in_bytes); bool add_to_freelist) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
} }
void MarkCompactCollector::DeallocateMapBlock(Address start, void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes) { int size_in_bytes,
bool add_to_freelist) {
// Objects in map space are assumed to have size Map::kSize and a // Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into // valid map in their first word. Thus, we break the free block up into
// chunks and free them separately. // chunks and free them separately.
...@@ -1346,13 +1444,14 @@ void MarkCompactCollector::DeallocateMapBlock(Address start, ...@@ -1346,13 +1444,14 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
Heap::ClearRSetRange(start, size_in_bytes); Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes; Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) { for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a); Heap::map_space()->Free(a, add_to_freelist);
} }
} }
void MarkCompactCollector::DeallocateCellBlock(Address start, void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes) { int size_in_bytes,
bool add_to_freelist) {
// Free-list elements in cell space are assumed to have a fixed size. // Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list // We break the free block into chunks and add them to the free list
// individually. // individually.
...@@ -1361,7 +1460,7 @@ void MarkCompactCollector::DeallocateCellBlock(Address start, ...@@ -1361,7 +1460,7 @@ void MarkCompactCollector::DeallocateCellBlock(Address start,
Heap::ClearRSetRange(start, size_in_bytes); Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes; Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) { for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a); Heap::cell_space()->Free(a, add_to_freelist);
} }
} }
......
...@@ -37,7 +37,11 @@ namespace internal { ...@@ -37,7 +37,11 @@ namespace internal {
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function for non-live blocks in the old generation. // Callback function for non-live blocks in the old generation.
typedef void (*DeallocateFunction)(Address start, int size_in_bytes); // If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
bool add_to_freelist);
// Forward declarations. // Forward declarations.
...@@ -313,11 +317,25 @@ class MarkCompactCollector: public AllStatic { ...@@ -313,11 +317,25 @@ class MarkCompactCollector: public AllStatic {
// Callback functions for deallocating non-live blocks in the old // Callback functions for deallocating non-live blocks in the old
// generation. // generation.
static void DeallocateOldPointerBlock(Address start, int size_in_bytes); static void DeallocateOldPointerBlock(Address start,
static void DeallocateOldDataBlock(Address start, int size_in_bytes); int size_in_bytes,
static void DeallocateCodeBlock(Address start, int size_in_bytes); bool add_to_freelist);
static void DeallocateMapBlock(Address start, int size_in_bytes);
static void DeallocateCellBlock(Address start, int size_in_bytes); static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
// If we are not compacting the heap, we simply sweep the spaces except // If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked // for the large object space, clearing mark bits and adding unmarked
......
...@@ -145,6 +145,40 @@ bool Page::IsRSetSet(Address address, int offset) { ...@@ -145,6 +145,40 @@ bool Page::IsRSetSet(Address address, int offset) {
} }
bool Page::GetPageFlag(PageFlag flag) {
return (flags & flag) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
flags |= flag;
} else {
flags &= ~flag;
}
}
bool Page::WasInUseBeforeMC() {
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
}
void Page::SetWasInUseBeforeMC(bool was_in_use) {
SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
}
bool Page::IsLargeObjectPage() {
return !GetPageFlag(IS_NORMAL_PAGE);
}
void Page::SetIsLargeObjectPage(bool is_large_object_page) {
SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// MemoryAllocator // MemoryAllocator
......
...@@ -524,7 +524,7 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, ...@@ -524,7 +524,7 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
for (int i = 0; i < pages_in_chunk; i++) { for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr); Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
p->is_normal_page = 1; p->SetIsLargeObjectPage(false);
page_addr += Page::kPageSize; page_addr += Page::kPageSize;
} }
...@@ -568,6 +568,15 @@ Page* MemoryAllocator::FreePages(Page* p) { ...@@ -568,6 +568,15 @@ Page* MemoryAllocator::FreePages(Page* p) {
} }
void MemoryAllocator::FreeAllPages(PagedSpace* space) {
for (int i = 0, length = chunks_.length(); i < length; i++) {
if (chunks_[i].owner() == space) {
DeleteChunk(i);
}
}
}
void MemoryAllocator::DeleteChunk(int chunk_id) { void MemoryAllocator::DeleteChunk(int chunk_id) {
ASSERT(IsValidChunk(chunk_id)); ASSERT(IsValidChunk(chunk_id));
...@@ -622,6 +631,74 @@ void MemoryAllocator::ReportStatistics() { ...@@ -622,6 +631,74 @@ void MemoryAllocator::ReportStatistics() {
#endif #endif
void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
Page** first_page,
Page** last_page,
Page** last_page_in_use) {
Page* first = NULL;
Page* last = NULL;
for (int i = 0, length = chunks_.length(); i < length; i++) {
ChunkInfo& chunk = chunks_[i];
if (chunk.owner() == space) {
if (first == NULL) {
Address low = RoundUp(chunk.address(), Page::kPageSize);
first = Page::FromAddress(low);
}
last = RelinkPagesInChunk(i,
chunk.address(),
chunk.size(),
last,
last_page_in_use);
}
}
if (first_page != NULL) {
*first_page = first;
}
if (last_page != NULL) {
*last_page = last;
}
}
Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
Address chunk_start,
int chunk_size,
Page* prev,
Page** last_page_in_use) {
Address page_addr = RoundUp(chunk_start, Page::kPageSize);
int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
if (prev->is_valid()) {
SetNextPage(prev, Page::FromAddress(page_addr));
}
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
page_addr += Page::kPageSize;
if (p->WasInUseBeforeMC()) {
*last_page_in_use = p;
}
}
// Set the next page of the last page to 0.
Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
last_page->opaque_header = OffsetFrom(0) | chunk_id;
if (last_page->WasInUseBeforeMC()) {
*last_page_in_use = last_page;
}
return last_page;
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// PagedSpace implementation // PagedSpace implementation
...@@ -677,6 +754,8 @@ bool PagedSpace::Setup(Address start, size_t size) { ...@@ -677,6 +754,8 @@ bool PagedSpace::Setup(Address start, size_t size) {
// Use first_page_ for allocation. // Use first_page_ for allocation.
SetAllocationInfo(&allocation_info_, first_page_); SetAllocationInfo(&allocation_info_, first_page_);
page_list_is_chunk_ordered_ = true;
return true; return true;
} }
...@@ -687,9 +766,8 @@ bool PagedSpace::HasBeenSetup() { ...@@ -687,9 +766,8 @@ bool PagedSpace::HasBeenSetup() {
void PagedSpace::TearDown() { void PagedSpace::TearDown() {
first_page_ = MemoryAllocator::FreePages(first_page_); MemoryAllocator::FreeAllPages(this);
ASSERT(!first_page_->is_valid()); first_page_ = NULL;
accounting_stats_.Clear(); accounting_stats_.Clear();
} }
...@@ -874,6 +952,12 @@ int PagedSpace::CountTotalPages() { ...@@ -874,6 +952,12 @@ int PagedSpace::CountTotalPages() {
void PagedSpace::Shrink() { void PagedSpace::Shrink() {
if (!page_list_is_chunk_ordered_) {
// We can't shrink space if pages is not chunk-ordered
// (see comment for class MemoryAllocator for definition).
return;
}
// Release half of free pages. // Release half of free pages.
Page* top_page = AllocationTopPage(); Page* top_page = AllocationTopPage();
ASSERT(top_page->is_valid()); ASSERT(top_page->is_valid());
...@@ -1782,6 +1866,9 @@ Object* FixedSizeFreeList::Allocate() { ...@@ -1782,6 +1866,9 @@ Object* FixedSizeFreeList::Allocate() {
// OldSpace implementation // OldSpace implementation
void OldSpace::PrepareForMarkCompact(bool will_compact) { void OldSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
PagedSpace::PrepareForMarkCompact(will_compact);
if (will_compact) { if (will_compact) {
// Reset relocation info. During a compacting collection, everything in // Reset relocation info. During a compacting collection, everything in
// the space is considered 'available' and we will rediscover live data // the space is considered 'available' and we will rediscover live data
...@@ -1852,6 +1939,100 @@ bool NewSpace::ReserveSpace(int bytes) { ...@@ -1852,6 +1939,100 @@ bool NewSpace::ReserveSpace(int bytes) {
} }
void PagedSpace::FreePages(Page* prev, Page* last) {
if (last == AllocationTopPage()) {
// Pages are already at the end of used pages.
return;
}
Page* first = NULL;
// Remove pages from the list.
if (prev == NULL) {
first = first_page_;
first_page_ = last->next_page();
} else {
first = prev->next_page();
MemoryAllocator::SetNextPage(prev, last->next_page());
}
// Attach it after the last page.
MemoryAllocator::SetNextPage(last_page_, first);
last_page_ = last;
MemoryAllocator::SetNextPage(last, NULL);
// Clean them up.
do {
first->ClearRSet();
first = first->next_page();
} while (first != NULL);
// Order of pages in this space might no longer be consistent with
// order of pages in chunks.
page_list_is_chunk_ordered_ = false;
}
void PagedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
// MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
// to skip unused pages. Update flag value for all pages in space.
PageIterator it(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage();
bool in_use = true;
while (it.has_next()) {
Page* p = it.next();
p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) {
// We passed a page containing allocation top. All consequent
// pages are not used.
in_use = false;
}
}
if (!page_list_is_chunk_ordered_) {
Page* new_last_in_use = NULL;
MemoryAllocator::RelinkPageListInChunkOrder(this,
&first_page_,
&last_page_,
&new_last_in_use);
ASSERT(new_last_in_use != NULL);
if (new_last_in_use != last_in_use) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
int size_in_bytes =
last_in_use->ObjectAreaEnd() - last_in_use->AllocationTop();
if (size_in_bytes > 0) {
// There is still some space left on this page. Create a fake
// object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page
// correctly.
FreeListNode* node =
FreeListNode::FromAddress(last_in_use->AllocationTop());
node->set_size(last_in_use->ObjectAreaEnd() -
last_in_use->AllocationTop());
}
// New last in use page was in the middle of the list before
// sorting so it full.
SetTop(new_last_in_use->AllocationTop(),
new_last_in_use->AllocationTop());
ASSERT(AllocationTopPage() == new_last_in_use);
}
page_list_is_chunk_ordered_ = true;
}
}
}
bool PagedSpace::ReserveSpace(int bytes) { bool PagedSpace::ReserveSpace(int bytes) {
Address limit = allocation_info_.limit; Address limit = allocation_info_.limit;
Address top = allocation_info_.top; Address top = allocation_info_.top;
...@@ -2263,6 +2444,9 @@ void OldSpace::PrintRSet() { DoPrintRSet("old"); } ...@@ -2263,6 +2444,9 @@ void OldSpace::PrintRSet() { DoPrintRSet("old"); }
// FixedSpace implementation // FixedSpace implementation
void FixedSpace::PrepareForMarkCompact(bool will_compact) { void FixedSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
PagedSpace::PrepareForMarkCompact(will_compact);
if (will_compact) { if (will_compact) {
// Reset relocation info. // Reset relocation info.
MCResetRelocationInfo(); MCResetRelocationInfo();
...@@ -2605,7 +2789,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size, ...@@ -2605,7 +2789,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// large object page. If the chunk_size happened to be written there, its // large object page. If the chunk_size happened to be written there, its
// low order bit should already be clear. // low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0); ASSERT((chunk_size & 0x1) == 0);
page->is_normal_page &= ~0x1; page->SetIsLargeObjectPage(true);
page->ClearRSet(); page->ClearRSet();
int extra_bytes = requested_size - object_size; int extra_bytes = requested_size - object_size;
if (extra_bytes > 0) { if (extra_bytes > 0) {
......
...@@ -167,8 +167,17 @@ class Page { ...@@ -167,8 +167,17 @@ class Page {
return 0 == (OffsetFrom(a) & kPageAlignmentMask); return 0 == (OffsetFrom(a) & kPageAlignmentMask);
} }
// True if this page was in use before current compaction started.
// Result is valid only for pages owned by paged spaces and
// only after PagedSpace::PrepareForMarkCompact was called.
inline bool WasInUseBeforeMC();
inline void SetWasInUseBeforeMC(bool was_in_use);
// True if this page is a large object page. // True if this page is a large object page.
bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; } inline bool IsLargeObjectPage();
inline void SetIsLargeObjectPage(bool is_large_object_page);
// Returns the offset of a given address to this page. // Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) { INLINE(int Offset(Address a)) {
...@@ -244,6 +253,14 @@ class Page { ...@@ -244,6 +253,14 @@ class Page {
// Maximum object size that fits in a page. // Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize; static const int kMaxHeapObjectSize = kObjectAreaSize;
enum PageFlag {
IS_NORMAL_PAGE = 1 << 0,
WAS_IN_USE_BEFORE_MC = 1 << 1
};
inline bool GetPageFlag(PageFlag flag);
inline void SetPageFlag(PageFlag flag, bool value);
//--------------------------------------------------------------------------- //---------------------------------------------------------------------------
// Page header description. // Page header description.
// //
...@@ -262,7 +279,8 @@ class Page { ...@@ -262,7 +279,8 @@ class Page {
// second word *may* (if the page start and large object chunk start are // second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the // the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared. // low-order bit for large object pages will be cleared.
int is_normal_page; // For normal pages this word is used to store various page flags.
int flags;
// The following fields may overlap with remembered set, they can only // The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not // be used in the mark-compact collector when remembered set is not
...@@ -407,6 +425,13 @@ class CodeRange : public AllStatic { ...@@ -407,6 +425,13 @@ class CodeRange : public AllStatic {
// //
// The memory allocator also allocates chunks for the large object space, but // The memory allocator also allocates chunks for the large object space, but
// they are managed by the space itself. The new space does not expand. // they are managed by the space itself. The new space does not expand.
//
// The fact that pages for paged spaces are allocated and deallocated in chunks
// induces a constraint on the order of pages in a linked lists. We say that
// pages are linked in the chunk-order if and only if every two consecutive
// pages from the same chunk are consecutive in the linked list.
//
class MemoryAllocator : public AllStatic { class MemoryAllocator : public AllStatic {
public: public:
...@@ -466,13 +491,18 @@ class MemoryAllocator : public AllStatic { ...@@ -466,13 +491,18 @@ class MemoryAllocator : public AllStatic {
static Page* AllocatePages(int requested_pages, int* allocated_pages, static Page* AllocatePages(int requested_pages, int* allocated_pages,
PagedSpace* owner); PagedSpace* owner);
// Frees pages from a given page and after. If 'p' is the first page // Frees pages from a given page and after. Requires pages to be
// of a chunk, pages from 'p' are freed and this function returns an // linked in chunk-order (see comment for class).
// invalid page pointer. Otherwise, the function searches a page // If 'p' is the first page of a chunk, pages from 'p' are freed
// after 'p' that is the first page of a chunk. Pages after the // and this function returns an invalid page pointer.
// found page are freed and the function returns 'p'. // Otherwise, the function searches a page after 'p' that is
// the first page of a chunk. Pages after the found page
// are freed and the function returns 'p'.
static Page* FreePages(Page* p); static Page* FreePages(Page* p);
// Frees all pages owned by given space.
static void FreeAllPages(PagedSpace* space);
// Allocates and frees raw memory of certain size. // Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free, // These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap. // but keep track of allocated bytes as part of heap.
...@@ -511,6 +541,15 @@ class MemoryAllocator : public AllStatic { ...@@ -511,6 +541,15 @@ class MemoryAllocator : public AllStatic {
static Page* FindFirstPageInSameChunk(Page* p); static Page* FindFirstPageInSameChunk(Page* p);
static Page* FindLastPageInSameChunk(Page* p); static Page* FindLastPageInSameChunk(Page* p);
// Relinks list of pages owned by space to make it chunk-ordered.
// Returns new first and last pages of space.
// Also returns last page in relinked list which has WasInUsedBeforeMC
// flag set.
static void RelinkPageListInChunkOrder(PagedSpace* space,
Page** first_page,
Page** last_page,
Page** last_page_in_use);
#ifdef ENABLE_HEAP_PROTECTION #ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable. // Protect/unprotect a block of memory by marking it read-only/writable.
static inline void Protect(Address start, size_t size); static inline void Protect(Address start, size_t size);
...@@ -599,6 +638,12 @@ class MemoryAllocator : public AllStatic { ...@@ -599,6 +638,12 @@ class MemoryAllocator : public AllStatic {
// used as a marking stack and its page headers are destroyed. // used as a marking stack and its page headers are destroyed.
static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner); PagedSpace* owner);
static Page* RelinkPagesInChunk(int chunk_id,
Address chunk_start,
int chunk_size,
Page* prev,
Page** last_page_in_use);
}; };
...@@ -880,7 +925,7 @@ class PagedSpace : public Space { ...@@ -880,7 +925,7 @@ class PagedSpace : public Space {
void ClearRSet(); void ClearRSet();
// Prepares for a mark-compact GC. // Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact) = 0; virtual void PrepareForMarkCompact(bool will_compact);
virtual Address PageAllocationTop(Page* page) = 0; virtual Address PageAllocationTop(Page* page) = 0;
...@@ -920,6 +965,16 @@ class PagedSpace : public Space { ...@@ -920,6 +965,16 @@ class PagedSpace : public Space {
// Used by ReserveSpace. // Used by ReserveSpace.
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
// Free all pages in range from prev (exclusive) to last (inclusive).
// Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last);
// Set space allocation info.
void SetTop(Address top, Address limit) {
allocation_info_.top = top;
allocation_info_.limit = limit;
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Mark-compact collection support functions // Mark-compact collection support functions
...@@ -968,6 +1023,9 @@ class PagedSpace : public Space { ...@@ -968,6 +1023,9 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(); static void ResetCodeStatistics();
#endif #endif
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
protected: protected:
// Maximum capacity of this space. // Maximum capacity of this space.
int max_capacity_; int max_capacity_;
...@@ -982,6 +1040,10 @@ class PagedSpace : public Space { ...@@ -982,6 +1040,10 @@ class PagedSpace : public Space {
// Expand and Shrink. // Expand and Shrink.
Page* last_page_; Page* last_page_;
// True if pages owned by this space are linked in chunk-order.
// See comment for class MemoryAllocator for definition of chunk-order.
bool page_list_is_chunk_ordered_;
// Normal allocation information. // Normal allocation information.
AllocationInfo allocation_info_; AllocationInfo allocation_info_;
...@@ -1043,8 +1105,6 @@ class PagedSpace : public Space { ...@@ -1043,8 +1105,6 @@ class PagedSpace : public Space {
void DoPrintRSet(const char* space_name); void DoPrintRSet(const char* space_name);
#endif #endif
private: private:
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
// Returns a pointer to the page of the relocation pointer. // Returns a pointer to the page of the relocation pointer.
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
...@@ -1671,10 +1731,15 @@ class OldSpace : public PagedSpace { ...@@ -1671,10 +1731,15 @@ class OldSpace : public PagedSpace {
// Give a block of memory to the space's free list. It might be added to // Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste. // the free list or accounted as waste.
void Free(Address start, int size_in_bytes) { // If add_to_freelist is false then just accounting stats are updated and
int wasted_bytes = free_list_.Free(start, size_in_bytes); // no attempt to add area to free list is made.
void Free(Address start, int size_in_bytes, bool add_to_freelist) {
accounting_stats_.DeallocateBytes(size_in_bytes); accounting_stats_.DeallocateBytes(size_in_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
if (add_to_freelist) {
int wasted_bytes = free_list_.Free(start, size_in_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
}
} }
// Prepare for full garbage collection. Resets the relocation pointer and // Prepare for full garbage collection. Resets the relocation pointer and
...@@ -1736,8 +1801,12 @@ class FixedSpace : public PagedSpace { ...@@ -1736,8 +1801,12 @@ class FixedSpace : public PagedSpace {
int object_size_in_bytes() { return object_size_in_bytes_; } int object_size_in_bytes() { return object_size_in_bytes_; }
// Give a fixed sized block of memory to the space's free list. // Give a fixed sized block of memory to the space's free list.
void Free(Address start) { // If add_to_freelist is false then just accounting stats are updated and
free_list_.Free(start); // no attempt to add area to free list is made.
void Free(Address start, bool add_to_freelist) {
if (add_to_freelist) {
free_list_.Free(start);
}
accounting_stats_.DeallocateBytes(object_size_in_bytes_); accounting_stats_.DeallocateBytes(object_size_in_bytes_);
} }
......
...@@ -830,11 +830,11 @@ TEST(LargeObjectSpaceContains) { ...@@ -830,11 +830,11 @@ TEST(LargeObjectSpaceContains) {
} }
CHECK(bytes_to_page > FixedArray::kHeaderSize); CHECK(bytes_to_page > FixedArray::kHeaderSize);
int* is_normal_page_ptr = &Page::FromAddress(next_page)->is_normal_page; intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags;
Address is_normal_page_addr = reinterpret_cast<Address>(is_normal_page_ptr); Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate = int bytes_to_allocate =
static_cast<int>(is_normal_page_addr - current_top) + kPointerSize; static_cast<int>(flags_addr - current_top) + kPointerSize;
int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) / int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
kPointerSize; kPointerSize;
...@@ -843,7 +843,7 @@ TEST(LargeObjectSpaceContains) { ...@@ -843,7 +843,7 @@ TEST(LargeObjectSpaceContains) {
Heap::AllocateFixedArray(n_elements)); Heap::AllocateFixedArray(n_elements));
int index = n_elements - 1; int index = n_elements - 1;
CHECK_EQ(is_normal_page_ptr, CHECK_EQ(flags_ptr,
HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index))); HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
array->set(index, Smi::FromInt(0)); array->set(index, Smi::FromInt(0));
// This chould have turned next page into LargeObjectPage: // This chould have turned next page into LargeObjectPage:
......
...@@ -77,7 +77,7 @@ TEST(Page) { ...@@ -77,7 +77,7 @@ TEST(Page) {
CHECK(p->is_valid()); CHECK(p->is_valid());
p->opaque_header = 0; p->opaque_header = 0;
p->is_normal_page = 0x1; p->SetIsLargeObjectPage(false);
CHECK(!p->next_page()->is_valid()); CHECK(!p->next_page()->is_valid());
CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset); CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment