Implement shrinking of paged spaces during sweeping.

For each paged space we release all but one of the unused pages after
marking (when we know the number of live bytes) but before actually
sweeping it. This is not yet done for lazy swept pages.

R=erik.corry@gmail.com
BUG=v8:1614

Review URL: http://codereview.chromium.org/7891010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9481 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1a0423b5
......@@ -144,7 +144,6 @@ Heap::Heap()
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
configured_(false),
last_empty_page_was_given_back_to_the_os_(false),
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
......@@ -813,8 +812,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = NOT_IN_GC;
Shrink();
isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
......@@ -5627,7 +5624,7 @@ void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->Shrink();
space->ReleaseAllUnusedPages();
}
......@@ -6470,6 +6467,7 @@ void Heap::FreeQueuedChunks() {
}
}
}
isolate_->heap()->store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
......
......@@ -1045,21 +1045,6 @@ class Heap {
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
static void IteratePointersToNewSpace(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
static void IteratePointersFromMapsToNewSpace(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
inline bool InNewSpace(Address addr);
......@@ -1442,12 +1427,6 @@ class Heap {
scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
}
bool ShouldWeGiveBackAPageToTheOS() {
last_empty_page_was_given_back_to_the_os_ =
!last_empty_page_was_given_back_to_the_os_;
return last_empty_page_was_given_back_to_the_os_;
}
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
......@@ -1818,7 +1797,6 @@ class Heap {
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
bool last_empty_page_was_given_back_to_the_os_;
MemoryChunk* chunks_queued_for_free_;
friend class Factory;
......
......@@ -2777,23 +2777,6 @@ static inline void UpdateSlot(ObjectVisitor* v,
}
static inline void UpdateSlotsInRange(Object** start, Object** end) {
for (Object** slot = start;
slot < end;
slot++) {
Object* obj = *slot;
if (obj->IsHeapObject() &&
MarkCompactCollector::IsOnEvacuationCandidate(obj)) {
MapWord map_word = HeapObject::cast(obj)->map_word();
if (map_word.IsForwardingAddress()) {
*slot = map_word.ToForwardingAddress();
ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
}
}
}
}
enum SweepingMode {
SWEEP_ONLY,
SWEEP_AND_VISIT_LIVE_OBJECTS
......@@ -3160,52 +3143,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
INLINE(static uint32_t SweepFree(PagedSpace* space,
Page* p,
uint32_t free_start,
uint32_t region_end,
uint32_t* cells));
static uint32_t SweepFree(PagedSpace* space,
Page* p,
uint32_t free_start,
uint32_t region_end,
uint32_t* cells) {
uint32_t free_cell_index = Bitmap::IndexToCell(free_start);
ASSERT(cells[free_cell_index] == 0);
while (free_cell_index < region_end && cells[free_cell_index] == 0) {
free_cell_index++;
}
if (free_cell_index >= region_end) {
return free_cell_index;
}
uint32_t free_end = Bitmap::CellToIndex(free_cell_index);
space->FreeOrUnmapPage(p,
p->MarkbitIndexToAddress(free_start),
(free_end - free_start) << kPointerSizeLog2);
return free_cell_index;
}
INLINE(static uint32_t NextCandidate(uint32_t cell_index,
uint32_t last_cell_index,
uint32_t* cells));
static uint32_t NextCandidate(uint32_t cell_index,
uint32_t last_cell_index,
uint32_t* cells) {
do {
cell_index++;
} while (cell_index < last_cell_index && cells[cell_index] != 0);
return cell_index;
}
static const int kStartTableEntriesPerLine = 5;
static const int kStartTableLines = 171;
static const int kStartTableInvalidLine = 127;
......@@ -3589,6 +3526,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
intptr_t freed_bytes = 0;
intptr_t newspace_size = space->heap()->new_space()->Size();
bool lazy_sweeping_active = false;
bool unused_page_present = false;
while (it.has_next()) {
Page* p = it.next();
......@@ -3615,6 +3553,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
continue;
}
// One unused page is kept, all further are released before sweeping them.
if (p->LiveBytes() == 0) {
if (unused_page_present) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
reinterpret_cast<intptr_t>(p));
}
space->ReleasePage(p);
continue;
}
unused_page_present = true;
}
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
reinterpret_cast<intptr_t>(p),
......@@ -3629,7 +3580,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
case LAZY_CONSERVATIVE: {
freed_bytes += SweepConservatively(space, p);
if (freed_bytes >= newspace_size && p != space->LastPage()) {
space->SetPagesToSweep(p->next_page(), space->LastPage());
space->SetPagesToSweep(p->next_page(), space->anchor());
lazy_sweeping_active = true;
}
break;
......@@ -3647,6 +3598,9 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
}
}
}
// Give pages that are queued to be freed back to the OS.
heap()->FreeQueuedChunks();
}
......
......@@ -763,8 +763,30 @@ int PagedSpace::CountTotalPages() {
#endif
void PagedSpace::Shrink() {
// TODO(1614) Not implemented.
void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
} else {
heap()->QueueMemoryChunkForFree(page);
}
ASSERT(Capacity() > 0);
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
}
void PagedSpace::ReleaseAllUnusedPages() {
PageIterator it(this);
while (it.has_next()) {
Page* page = it.next();
if (page->LiveBytes() == 0) {
ReleasePage(page);
}
}
heap()->FreeQueuedChunks();
}
......@@ -1649,25 +1671,6 @@ void FreeList::Reset() {
}
int PagedSpace::FreeOrUnmapPage(Page* page, Address start, int size_in_bytes) {
Heap* heap = page->heap();
// TODO(gc): When we count the live bytes per page we can free empty pages
// instead of sweeping. At that point this if should be turned into an
// ASSERT that the area to be freed cannot be the entire page.
if (size_in_bytes == Page::kObjectAreaSize &&
heap->ShouldWeGiveBackAPageToTheOS()) {
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap->isolate()->memory_allocator()->Free(page);
} else {
heap->QueueMemoryChunkForFree(page);
}
return 0;
}
return Free(start, size_in_bytes);
}
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
FreeListNode* node = FreeListNode::FromAddress(start);
......@@ -1920,7 +1923,7 @@ void PagedSpace::PrepareForMarkCompact() {
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
Page* last = last_unswept_page_->next_page();
Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
// Do not use ShouldBeSweptLazily predicate here.
......@@ -1977,7 +1980,7 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
if (IsSweepingComplete()) return true;
intptr_t freed_bytes = 0;
Page* last = last_unswept_page_->next_page();
Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
......
......@@ -1235,6 +1235,15 @@ class AllocationStats BASE_EMBEDDED {
ASSERT(size_ >= 0);
}
// Shrink the space by removing available bytes. Since shrinking is done
// during sweeping, bytes have been marked as being in use (part of the size)
// and are hereby freed.
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
ASSERT(size_ >= 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
......@@ -1484,8 +1493,6 @@ class PagedSpace : public Space {
return size_in_bytes - wasted;
}
int FreeOrUnmapPage(Page* page, Address start, int size_in_bytes);
// Set space allocation info.
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
......@@ -1502,8 +1509,11 @@ class PagedSpace : public Space {
accounting_stats_.ExpandSpace(size);
}
// Releases half of unused pages.
void Shrink();
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page);
// Releases all of the unused pages.
void ReleaseAllUnusedPages();
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment