Commit 930aa4a5 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Don't unlink evacuation candidates before sweeping, move them to the end of their list of pages.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/256743004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@21042 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1a2d6526
......@@ -3176,7 +3176,6 @@ void MarkCompactCollector::EvacuatePages() {
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
}
return;
}
......@@ -3667,14 +3666,14 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
void MarkCompactCollector::UnlinkEvacuationCandidates() {
void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
p->Unlink();
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
p->InsertAfter(space->LastPage());
}
}
......@@ -3689,7 +3688,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
space->ReleasePage(p, false);
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
......@@ -4113,6 +4112,7 @@ void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
free_list->Concatenate(&private_free_list);
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
if (p == space->end_of_unswept_pages()) break;
}
}
......@@ -4123,6 +4123,10 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
// of the pages list.
space->set_end_of_unswept_pages(space->FirstPage());
PageIterator it(space);
int pages_swept = 0;
......@@ -4131,15 +4135,14 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
ASSERT(!p->IsEvacuationCandidate());
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
ASSERT(evacuation_candidates_.length() > 0);
continue;
......@@ -4155,7 +4158,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
// Adjust unswept free bytes because releasing a page expects said
// counter to be accurate for unswept pages.
space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p, true);
space->ReleasePage(p);
continue;
}
unused_page_present = true;
......@@ -4189,6 +4192,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
}
space->set_end_of_unswept_pages(p);
break;
}
case PRECISE: {
......@@ -4238,9 +4242,7 @@ void MarkCompactCollector::SweepSpaces() {
}
if (sweep_precisely_) how_to_sweep = PRECISE;
// Unlink evacuation candidates before sweeper threads access the list of
// pages to avoid race condition.
UnlinkEvacuationCandidates();
MoveEvacuationCandidatesToEndOfPagesList();
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
......
......@@ -704,9 +704,6 @@ class MarkCompactCollector {
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
void UnlinkEvacuationCandidates();
void ReleaseEvacuationCandidates();
void StartSweeperThreads();
#ifdef DEBUG
......@@ -895,6 +892,12 @@ class MarkCompactCollector {
void EvacuateNewSpaceAndCandidates();
void ReleaseEvacuationCandidates();
// Moves the pages of the evacuation_candidates_ list to the end of their
// corresponding space pages list.
void MoveEvacuationCandidatesToEndOfPagesList();
void SweepSpace(PagedSpace* space, SweeperType sweeper);
// Finalizes the parallel sweeping phase. Marks all the pages that were
......
......@@ -559,10 +559,6 @@ void MemoryChunk::InsertAfter(MemoryChunk* other) {
void MemoryChunk::Unlink() {
if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
heap_->decrement_scan_on_scavenge_pages();
ClearFlag(SCAN_ON_SCAVENGE);
}
MemoryChunk* next_element = next_chunk();
MemoryChunk* prev_element = prev_chunk();
next_element->set_prev_chunk(prev_element);
......@@ -930,7 +926,8 @@ PagedSpace::PagedSpace(Heap* heap,
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
unswept_free_bytes_(0) {
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
CodePageAreaSize();
......@@ -1103,7 +1100,7 @@ void PagedSpace::IncreaseCapacity(int size) {
}
void PagedSpace::ReleasePage(Page* page, bool unlink) {
void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
......@@ -1115,6 +1112,11 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
heap()->decrement_scan_on_scavenge_pages();
page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
}
ASSERT(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
......@@ -1122,9 +1124,7 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
allocation_info_.set_limit(NULL);
}
if (unlink) {
page->Unlink();
}
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
} else {
......
......@@ -1831,7 +1831,7 @@ class PagedSpace : public Space {
void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page, bool unlink);
void ReleasePage(Page* page);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
......@@ -1891,6 +1891,20 @@ class PagedSpace : public Space {
unswept_free_bytes_ = 0;
}
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
// It returns true when sweeping is completed and false otherwise.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
void set_end_of_unswept_pages(Page* page) {
end_of_unswept_pages_ = page;
}
Page* end_of_unswept_pages() {
return end_of_unswept_pages_;
}
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
......@@ -1935,6 +1949,11 @@ class PagedSpace : public Space {
// sweeping is done conservatively.
intptr_t unswept_free_bytes_;
// The sweeper threads iterate over the list of pointer and data space pages
// and sweep these pages concurrently. They will stop sweeping after the
// end_of_unswept_pages_ page.
Page* end_of_unswept_pages_;
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment