Commit fffadaf9 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Unlink evacuation candidates from list of pages before starting sweeper threads.

Removed FinalizeSweeping().

BUG=

Review URL: https://codereview.chromium.org/12499004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13886 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 38cc926a
...@@ -585,13 +585,6 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() { ...@@ -585,13 +585,6 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
} }
void MarkCompactCollector::FinalizeSweeping() {
ASSERT(sweeping_pending_ == false);
ReleaseEvacuationCandidates();
heap()->FreeQueuedChunks();
}
void MarkCompactCollector::MarkInParallel() { void MarkCompactCollector::MarkInParallel() {
for (int i = 0; i < FLAG_marking_threads; i++) { for (int i = 0; i < FLAG_marking_threads; i++) {
heap()->isolate()->marking_threads()[i]->StartMarking(); heap()->isolate()->marking_threads()[i]->StartMarking();
...@@ -911,7 +904,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { ...@@ -911,7 +904,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
if (IsConcurrentSweepingInProgress()) { if (IsConcurrentSweepingInProgress()) {
// Instead of waiting we could also abort the sweeper threads here. // Instead of waiting we could also abort the sweeper threads here.
WaitUntilSweepingCompleted(); WaitUntilSweepingCompleted();
FinalizeSweeping();
} }
// Clear marking bits if incremental marking is aborted. // Clear marking bits if incremental marking is aborted.
...@@ -2849,6 +2841,7 @@ void MarkCompactCollector::EvacuatePages() { ...@@ -2849,6 +2841,7 @@ void MarkCompactCollector::EvacuatePages() {
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
page->ClearEvacuationCandidate(); page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION); page->SetFlag(Page::RESCAN_ON_EVACUATION);
page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
} }
return; return;
} }
...@@ -3309,6 +3302,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3309,6 +3302,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
} }
void MarkCompactCollector::UnlinkEvacuationCandidates() {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
p->Unlink();
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
}
}
void MarkCompactCollector::ReleaseEvacuationCandidates() { void MarkCompactCollector::ReleaseEvacuationCandidates() {
int npages = evacuation_candidates_.length(); int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) { for (int i = 0; i < npages; i++) {
...@@ -3319,10 +3324,11 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { ...@@ -3319,10 +3324,11 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
p->set_scan_on_scavenge(false); p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes(); p->ResetLiveBytes();
space->ReleasePage(p); space->ReleasePage(p, false);
} }
evacuation_candidates_.Rewind(0); evacuation_candidates_.Rewind(0);
compacting_ = false; compacting_ = false;
heap()->FreeQueuedChunks();
} }
...@@ -3794,7 +3800,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -3794,7 +3800,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
// Adjust unswept free bytes because releasing a page expects said // Adjust unswept free bytes because releasing a page expects said
// counter to be accurate for unswept pages. // counter to be accurate for unswept pages.
space->IncreaseUnsweptFreeBytes(p); space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p); space->ReleasePage(p, true);
continue; continue;
} }
unused_page_present = true; unused_page_present = true;
...@@ -3899,6 +3905,10 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -3899,6 +3905,10 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(heap()->old_pointer_space(), how_to_sweep); SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep); SweepSpace(heap()->old_data_space(), how_to_sweep);
// Unlink evacuation candidates before sweeper threads access the list of
// pages to avoid race condition.
UnlinkEvacuationCandidates();
if (how_to_sweep == PARALLEL_CONSERVATIVE || if (how_to_sweep == PARALLEL_CONSERVATIVE ||
how_to_sweep == CONCURRENT_CONSERVATIVE) { how_to_sweep == CONCURRENT_CONSERVATIVE) {
// TODO(hpayer): fix race with concurrent sweeper // TODO(hpayer): fix race with concurrent sweeper
...@@ -3924,9 +3934,7 @@ void MarkCompactCollector::SweepSpaces() { ...@@ -3924,9 +3934,7 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate unmarked objects and clear marked bits for marked objects. // Deallocate unmarked objects and clear marked bits for marked objects.
heap_->lo_space()->FreeUnmarkedObjects(); heap_->lo_space()->FreeUnmarkedObjects();
if (how_to_sweep != CONCURRENT_CONSERVATIVE) { ReleaseEvacuationCandidates();
FinalizeSweeping();
}
} }
......
...@@ -690,8 +690,6 @@ class MarkCompactCollector { ...@@ -690,8 +690,6 @@ class MarkCompactCollector {
bool IsConcurrentSweepingInProgress(); bool IsConcurrentSweepingInProgress();
void FinalizeSweeping();
void set_sequential_sweeping(bool sequential_sweeping) { void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping; sequential_sweeping_ = sequential_sweeping;
} }
...@@ -713,6 +711,7 @@ class MarkCompactCollector { ...@@ -713,6 +711,7 @@ class MarkCompactCollector {
void RemoveDeadInvalidatedCode(); void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor); void ProcessInvalidatedCode(ObjectVisitor* visitor);
void UnlinkEvacuationCandidates();
void ReleaseEvacuationCandidates(); void ReleaseEvacuationCandidates();
void StartSweeperThreads(); void StartSweeperThreads();
......
...@@ -981,6 +981,7 @@ bool PagedSpace::CanExpand() { ...@@ -981,6 +981,7 @@ bool PagedSpace::CanExpand() {
return true; return true;
} }
bool PagedSpace::Expand() { bool PagedSpace::Expand() {
if (!CanExpand()) return false; if (!CanExpand()) return false;
...@@ -1045,7 +1046,7 @@ int PagedSpace::CountTotalPages() { ...@@ -1045,7 +1046,7 @@ int PagedSpace::CountTotalPages() {
} }
void PagedSpace::ReleasePage(Page* page) { void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0); ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size()); ASSERT(AreaSize() == page->area_size());
...@@ -1069,7 +1070,9 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -1069,7 +1070,9 @@ void PagedSpace::ReleasePage(Page* page) {
allocation_info_.top = allocation_info_.limit = NULL; allocation_info_.top = allocation_info_.limit = NULL;
} }
if (unlink) {
page->Unlink(); page->Unlink();
}
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page); heap()->isolate()->memory_allocator()->Free(page);
} else { } else {
...@@ -2555,7 +2558,6 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { ...@@ -2555,7 +2558,6 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) { if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
if (!collector->sequential_sweeping()) { if (!collector->sequential_sweeping()) {
collector->WaitUntilSweepingCompleted(); collector->WaitUntilSweepingCompleted();
collector->FinalizeSweeping();
return true; return true;
} }
} }
......
...@@ -1697,7 +1697,7 @@ class PagedSpace : public Space { ...@@ -1697,7 +1697,7 @@ class PagedSpace : public Space {
} }
// Releases an unused page and shrinks the space. // Releases an unused page and shrinks the space.
void ReleasePage(Page* page); void ReleasePage(Page* page, bool unlink);
// The dummy page that anchors the linked list of pages. // The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; } Page* anchor() { return &anchor_; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment