Commit 94dc1075 authored by hpayer@chromium.org's avatar hpayer@chromium.org

The sweeper thread should not write the page flags. Added a sweeping complete...

The sweeper thread should not write the page flags. Added a sweeping complete phase, where the main thread writes the given page flags.

BUG=
R=jarin@chromium.org, jochen@chromium.org

Review URL: https://codereview.chromium.org/163683003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19377 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9ec8e586
......@@ -622,6 +622,7 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
sweeping_pending_ = false;
RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
......@@ -3946,7 +3947,11 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
free_list == NULL));
p->MarkSweptConservatively();
// When parallel sweeping is active, the page will be marked after
// sweeping by the main thread.
if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
p->MarkSweptConservatively();
}
intptr_t freed_bytes = 0;
size_t size = 0;
......@@ -4058,7 +4063,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
ASSERT(p->parallel_sweeping() == 0);
ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
ASSERT(!p->IsEvacuationCandidate());
// Clear sweeping flags indicating that marking bits are still intact.
......@@ -4131,7 +4136,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
p->set_parallel_sweeping(1);
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
}
break;
......@@ -4223,6 +4228,24 @@ void MarkCompactCollector::SweepSpaces() {
}
void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_IN_PROGRESS) {
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
p->MarkSweptConservatively();
}
}
}
void MarkCompactCollector::ParallelSweepSpacesComplete() {
ParallelSweepSpaceComplete(heap()->old_pointer_space());
ParallelSweepSpaceComplete(heap()->old_data_space());
}
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (isolate()->debug()->IsLoaded() ||
......
......@@ -944,6 +944,12 @@ class MarkCompactCollector {
void SweepSpace(PagedSpace* space, SweeperType sweeper);
// Finalizes the parallel sweeping phase. Marks all the pages that were
// swept in parallel.
void ParallelSweepSpacesComplete();
void ParallelSweepSpaceComplete(PagedSpace* space);
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
......
......@@ -483,7 +483,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->parallel_sweeping_ = 0;
chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
......
......@@ -467,16 +467,32 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
intptr_t parallel_sweeping() const {
return parallel_sweeping_;
// PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
// PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept or was
// swept by a sweeper thread.
// PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
// sweeping must not be performed on that page.
enum ParallelSweepingState {
PARALLEL_SWEEPING_DONE,
PARALLEL_SWEEPING_IN_PROGRESS,
PARALLEL_SWEEPING_PENDING
};
ParallelSweepingState parallel_sweeping() {
return static_cast<ParallelSweepingState>(
NoBarrier_Load(&parallel_sweeping_));
}
void set_parallel_sweeping(intptr_t state) {
parallel_sweeping_ = state;
void set_parallel_sweeping(ParallelSweepingState state) {
NoBarrier_Store(&parallel_sweeping_, state);
}
bool TryParallelSweeping() {
return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
return NoBarrier_CompareAndSwap(&parallel_sweeping_,
PARALLEL_SWEEPING_PENDING,
PARALLEL_SWEEPING_IN_PROGRESS) ==
PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
......@@ -711,7 +727,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
intptr_t parallel_sweeping_;
AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment