Commit b8dc77ab authored by vegorov@chromium.org's avatar vegorov@chromium.org

Fix Heap::Shrink to ensure that it does not free pages that are still in use.

Heap::Shrink is called from EnsureFromSpaceIsCommitted at the very start of the GC. At this moment live bytes counts on pages are in inconsistent states. Some pages might have been already swept but have not been yet reached by an incremental marker (or incremental marker is not in progress) and have live bytes count set to 0. Thus we can't rely only on LiveBytes to determine which pages can be released to the OS.

R=mstarzinger@chromium.org
BUG=100414

Review URL: http://codereview.chromium.org/8507038

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9953 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 33682c6b
......@@ -5609,8 +5609,11 @@ void Heap::TearDown() {
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->ReleaseAllUnusedPages();
}
}
......
......@@ -3547,7 +3547,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
case LAZY_CONSERVATIVE: {
freed_bytes += SweepConservatively(space, p);
if (freed_bytes >= newspace_size && p != space->LastPage()) {
space->SetPagesToSweep(p->next_page(), space->anchor());
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
}
break;
......
......@@ -658,8 +658,7 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
first_unswept_page_(Page::FromAddress(NULL)),
last_unswept_page_(Page::FromAddress(NULL)) {
first_unswept_page_(Page::FromAddress(NULL)) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
......@@ -754,6 +753,21 @@ int PagedSpace::CountTotalPages() {
void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
// Adjust list of unswept pages if the page is it's head or tail.
if (first_unswept_page_ == page) {
first_unswept_page_ = page->next_page();
if (first_unswept_page_ == anchor()) {
first_unswept_page_ = Page::FromAddress(NULL);
}
}
if (page->WasSwept()) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
}
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
......@@ -771,8 +785,26 @@ void PagedSpace::ReleaseAllUnusedPages() {
PageIterator it(this);
while (it.has_next()) {
Page* page = it.next();
if (page->LiveBytes() == 0) {
ReleasePage(page);
if (!page->WasSwept()) {
if (page->LiveBytes() == 0) ReleasePage(page);
} else {
HeapObject* obj = HeapObject::FromAddress(page->body());
if (obj->IsFreeSpace() &&
FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
// Sometimes we allocate memory from free list but don't
// immediately initialize it (e.g. see PagedSpace::ReserveSpace
// called from Heap::ReserveSpace that can cause GC before
// reserved space is actually initialized).
// Thus we can't simply assume that obj represents a valid
// node still owned by a free list
// Instead we should verify that the page is fully covered
// by free list items.
FreeList::SizeStats sizes;
free_list_.CountFreeListItems(page, &sizes);
if (sizes.Total() == Page::kObjectAreaSize) {
ReleasePage(page);
}
}
}
}
heap()->FreeQueuedChunks();
......@@ -1870,13 +1902,50 @@ static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
}
void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
sizes[0] = CountFreeListItemsInList(small_list_, p);
sizes[1] = CountFreeListItemsInList(medium_list_, p);
sizes[2] = CountFreeListItemsInList(large_list_, p);
sizes[3] = CountFreeListItemsInList(huge_list_, p);
void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
if (sizes->huge_size_ < Page::kObjectAreaSize) {
sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
} else {
sizes->small_size_ = 0;
sizes->medium_size_ = 0;
sizes->large_size_ = 0;
}
}
static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
intptr_t sum = 0;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
sum += free_space->Size();
*n = (*n)->next();
} else {
n = (*n)->next_address();
}
}
return sum;
}
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
if (sum < Page::kObjectAreaSize) {
sum += EvictFreeListItemsInList(&small_list_, p) +
EvictFreeListItemsInList(&medium_list_, p) +
EvictFreeListItemsInList(&large_list_, p);
}
available_ -= sum;
return sum;
}
#ifdef DEBUG
intptr_t FreeList::SumFreeList(FreeListNode* cur) {
intptr_t sum = 0;
......@@ -1963,7 +2032,6 @@ void PagedSpace::PrepareForMarkCompact() {
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
// Do not use ShouldBeSweptLazily predicate here.
......@@ -1977,9 +2045,9 @@ void PagedSpace::PrepareForMarkCompact() {
}
}
p = p->next_page();
} while (p != last);
} while (p != anchor());
}
first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
first_unswept_page_ = Page::FromAddress(NULL);
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
......@@ -2020,7 +2088,6 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
if (IsSweepingComplete()) return true;
intptr_t freed_bytes = 0;
Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
......@@ -2032,10 +2099,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
p = next_page;
} while (p != last && freed_bytes < bytes_to_sweep);
} while (p != anchor() && freed_bytes < bytes_to_sweep);
if (p == last) {
last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
if (p == anchor()) {
first_unswept_page_ = Page::FromAddress(NULL);
} else {
first_unswept_page_ = p;
}
......
......@@ -1347,8 +1347,6 @@ class FreeList BASE_EMBEDDED {
// 'wasted_bytes'. The size should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
void MarkNodes();
#ifdef DEBUG
void Zap();
static intptr_t SumFreeList(FreeListNode* node);
......@@ -1357,7 +1355,20 @@ class FreeList BASE_EMBEDDED {
bool IsVeryLong();
#endif
void CountFreeListItems(Page* p, intptr_t* sizes);
struct SizeStats {
intptr_t Total() {
return small_size_ + medium_size_ + large_size_ + huge_size_;
}
intptr_t small_size_;
intptr_t medium_size_;
intptr_t large_size_;
intptr_t huge_size_;
};
void CountFreeListItems(Page* p, SizeStats* sizes);
intptr_t EvictFreeListItems(Page* p);
private:
// The size range of blocks, in bytes.
......@@ -1541,9 +1552,8 @@ class PagedSpace : public Space {
!p->WasSweptPrecisely();
}
void SetPagesToSweep(Page* first, Page* last) {
void SetPagesToSweep(Page* first) {
first_unswept_page_ = first;
last_unswept_page_ = last;
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
......@@ -1556,16 +1566,18 @@ class PagedSpace : public Space {
Page* LastPage() { return anchor_.prev_page(); }
bool IsFragmented(Page* p) {
intptr_t sizes[4];
free_list_.CountFreeListItems(p, sizes);
FreeList::SizeStats sizes;
free_list_.CountFreeListItems(p, &sizes);
intptr_t ratio;
intptr_t ratio_threshold;
if (identity() == CODE_SPACE) {
ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize;
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
Page::kObjectAreaSize;
ratio_threshold = 10;
} else {
ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize;
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
Page::kObjectAreaSize;
ratio_threshold = 15;
}
......@@ -1573,19 +1585,23 @@ class PagedSpace : public Space {
PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
reinterpret_cast<void*>(p),
identity(),
static_cast<int>(sizes[0]),
static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize,
static_cast<int>(sizes[1]),
static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize,
static_cast<int>(sizes[2]),
static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize,
static_cast<int>(sizes[3]),
static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize,
static_cast<int>(sizes.small_size_),
static_cast<double>(sizes.small_size_ * 100) /
Page::kObjectAreaSize,
static_cast<int>(sizes.medium_size_),
static_cast<double>(sizes.medium_size_ * 100) /
Page::kObjectAreaSize,
static_cast<int>(sizes.large_size_),
static_cast<double>(sizes.large_size_ * 100) /
Page::kObjectAreaSize,
static_cast<int>(sizes.huge_size_),
static_cast<double>(sizes.huge_size_ * 100) /
Page::kObjectAreaSize,
(ratio > ratio_threshold) ? "[fragmented]" : "");
}
return (ratio > ratio_threshold) ||
(FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
(FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize);
}
void EvictEvacuationCandidatesFromFreeLists();
......@@ -1617,7 +1633,6 @@ class PagedSpace : public Space {
bool was_swept_conservatively_;
Page* first_unswept_page_;
Page* last_unswept_page_;
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS.
......@@ -2334,8 +2349,6 @@ class FixedSpace : public PagedSpace {
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
void MarkFreeListNodes() { free_list_.MarkNodes(); }
protected:
void ResetFreeList() {
free_list_.Reset();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment