Commit 05ec0ff0 authored by hpayer's avatar hpayer Committed by Commit bot

Use a lock in pages to synchronize sweeper threads to allow others to wait on...

Use a lock in pages to synchronize sweeper threads to allow others to wait on concurrently swept pages.

BUG=

Review URL: https://codereview.chromium.org/1244353002

Cr-Commit-Position: refs/heads/master@{#29804}
parent 47d3bb1c
...@@ -4284,12 +4284,19 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space, ...@@ -4284,12 +4284,19 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0; int max_freed = 0;
if (page->TryParallelSweeping()) { if (page->TryLock()) {
// If this page was already swept in the meantime, we can return here.
if (page->parallel_sweeping() != MemoryChunk::SWEEPING_PENDING) {
page->mutex()->Unlock();
return 0;
}
page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS);
FreeList* free_list = free_list_old_space_.get(); FreeList* free_list = free_list_old_space_.get();
FreeList private_free_list(space); FreeList private_free_list(space);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
free_list->Concatenate(&private_free_list); free_list->Concatenate(&private_free_list);
page->mutex()->Unlock();
} }
return max_freed; return max_freed;
} }
......
...@@ -142,6 +142,7 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) { ...@@ -142,6 +142,7 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) { PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
page->mutex_ = new base::Mutex();
DCHECK(page->area_size() <= kMaxRegularHeapObjectSize); DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
DCHECK(chunk->owner() == owner); DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size()); owner->IncreaseCapacity(page->area_size());
......
...@@ -499,6 +499,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -499,6 +499,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->progress_bar_ = 0; chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base); chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->set_parallel_sweeping(SWEEPING_DONE); chunk->set_parallel_sweeping(SWEEPING_DONE);
chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0; chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0; chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0; chunk->available_in_large_free_list_ = 0;
...@@ -765,6 +766,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { ...@@ -765,6 +766,7 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
delete chunk->slots_buffer(); delete chunk->slots_buffer();
delete chunk->skip_list(); delete chunk->skip_list();
delete chunk->mutex();
base::VirtualMemory* reservation = chunk->reserved_memory(); base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) { if (reservation->IsReserved()) {
......
...@@ -452,10 +452,17 @@ class MemoryChunk { ...@@ -452,10 +452,17 @@ class MemoryChunk {
base::Release_Store(&parallel_sweeping_, state); base::Release_Store(&parallel_sweeping_, state);
} }
bool TryParallelSweeping() { bool TryLock() { return mutex_->TryLock(); }
return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
SWEEPING_IN_PROGRESS) == base::Mutex* mutex() { return mutex_; }
SWEEPING_PENDING;
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
// progress. In particular, when we know that right before this call a
// sweeper thread was sweeping this page.
void WaitUntilSweepingCompleted() {
mutex_->Lock();
mutex_->Unlock();
DCHECK(SweepingCompleted());
} }
bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
...@@ -537,9 +544,15 @@ class MemoryChunk { ...@@ -537,9 +544,15 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset = static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize; kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize = static const size_t kHeaderSize = kWriteBarrierCounterOffset +
kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize + kPointerSize + // write_barrier_counter_
kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize; kIntSize + // progress_bar_
kIntSize + // high_water_mark_
kPointerSize + // mutex_ page lock
kPointerSize + // parallel_sweeping_
5 * kPointerSize + // free list statistics
kPointerSize + // next_chunk_
kPointerSize; // prev_chunk_
static const int kBodyOffset = static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
...@@ -675,6 +688,7 @@ class MemoryChunk { ...@@ -675,6 +688,7 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page. // count highest number of bytes ever allocated on the page.
int high_water_mark_; int high_water_mark_;
base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_; base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics. // PagedSpace free-list statistics.
......
...@@ -472,10 +472,9 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { ...@@ -472,10 +472,9 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
heap_->mark_compact_collector()->SweepInParallel(page, owner); heap_->mark_compact_collector()->SweepInParallel(page, owner);
if (!page->SweepingCompleted()) { if (!page->SweepingCompleted()) {
// We were not able to sweep that page, i.e., a concurrent // We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. // sweeper thread currently owns this page. Wait for the sweeper
// TODO(hpayer): This may introduce a huge pause here. We // thread to be done with this page.
// just care about finish sweeping of the scan on scavenge page. page->WaitUntilSweepingCompleted();
heap_->mark_compact_collector()->EnsureSweepingCompleted();
} }
} }
CHECK(page->owner() == heap_->old_space()); CHECK(page->owner() == heap_->old_space());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment