Commit 44caabb1 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Parallel and concurrent sweeping.

Sweep old pointer space and old data space concurrently to the main mutator thread and in parallel.

BUG=

Review URL: https://codereview.chromium.org/11782028

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13552 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c027397c
......@@ -417,6 +417,10 @@ DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 1,
"number of parallel and concurrent sweeping threads")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
......
......@@ -1308,7 +1308,8 @@ void Heap::Scavenge() {
incremental_marking()->PrepareForScavenge();
AdvanceSweepers(static_cast<int>(new_space_.Size()));
paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
......@@ -5420,9 +5421,9 @@ bool Heap::IdleNotification(int hint) {
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
if (incremental_marking()->IsStopped()) {
if (!IsSweepingComplete() &&
if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
!IsSweepingComplete() &&
!AdvanceSweepers(static_cast<int>(step_size))) {
return false;
}
......
......@@ -1636,6 +1636,7 @@ class Heap {
}
bool AdvanceSweepers(int step_size) {
ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
......
......@@ -49,6 +49,7 @@
#include "simulator.h"
#include "spaces.h"
#include "stub-cache.h"
#include "sweeper-thread.h"
#include "version.h"
#include "vm-state-inl.h"
......@@ -1699,6 +1700,7 @@ Isolate::Isolate()
#undef ISOLATE_INIT_ARRAY_EXECUTE
}
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
......@@ -1734,6 +1736,14 @@ void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
if (FLAG_concurrent_sweeping || FLAG_parallel_sweeping) {
for (int i = 0; i < FLAG_sweeper_threads; i++) {
sweeper_thread_[i]->Stop();
delete sweeper_thread_[i];
}
delete[] sweeper_thread_;
}
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
......@@ -2103,6 +2113,17 @@ bool Isolate::Init(Deserializer* des) {
}
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
if (FLAG_parallel_sweeping || FLAG_concurrent_sweeping) {
if (FLAG_sweeper_threads < 1) {
FLAG_sweeper_threads = 1;
}
sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
for (int i = 0; i < FLAG_sweeper_threads; i++) {
sweeper_thread_[i] = new SweeperThread(this);
sweeper_thread_[i]->Start();
}
}
return true;
}
......
......@@ -78,6 +78,7 @@ class UnicodeCache;
class ConsStringIteratorOp;
class StringTracker;
class StubCache;
class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
......@@ -1078,6 +1079,10 @@ class Isolate {
// TODO(svenpanne) This method is on death row...
static v8::Isolate* GetDefaultIsolateForLocking();
SweeperThread** sweeper_threads() {
return sweeper_thread_;
}
private:
Isolate();
......@@ -1301,11 +1306,13 @@ class Isolate {
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread optimizing_compiler_thread_;
SweeperThread** sweeper_thread_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
friend class OptimizingCompilerThread;
friend class SweeperThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
......
This diff is collapsed.
......@@ -594,9 +594,15 @@ class MarkCompactCollector {
enum SweeperType {
CONSERVATIVE,
LAZY_CONSERVATIVE,
PARALLEL_CONSERVATIVE,
PRECISE
};
enum SweepingParallelism {
SWEEP_SEQUENTIALLY,
SWEEP_IN_PARALLEL
};
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
......@@ -605,7 +611,10 @@ class MarkCompactCollector {
// Sweep a single page from the given space conservatively.
// Return a number of reclaimed bytes.
static intptr_t SweepConservatively(PagedSpace* space, Page* p);
template<SweepingParallelism type>
static intptr_t SweepConservatively(PagedSpace* space,
FreeList* free_list,
Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor))->
......@@ -671,6 +680,16 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; }
void SweepInParallel(PagedSpace* space,
FreeList* private_free_list,
FreeList* free_list);
void WaitUntilSweepingCompleted();
intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
bool AreSweeperThreadsActivated();
private:
MarkCompactCollector();
~MarkCompactCollector();
......@@ -679,6 +698,7 @@ class MarkCompactCollector {
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
void StartSweeperThreads();
#ifdef DEBUG
enum CollectorState {
......
......@@ -466,6 +466,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->parallel_sweeping_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
......@@ -2041,6 +2042,29 @@ void FreeListNode::set_next(FreeListNode* next) {
}
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top_ != NULL) {
ASSERT(category->end_ != NULL);
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
ScopedLock lock_target(mutex_);
ScopedLock lock_source(category->mutex());
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
category->end()->set_next(top_);
}
top_ = category->top();
available_ += category->available();
category->Reset();
}
return free_bytes;
}
void FreeListCategory::Reset() {
top_ = NULL;
end_ = NULL;
......@@ -2139,6 +2163,16 @@ FreeList::FreeList(PagedSpace* owner)
}
intptr_t FreeList::Concatenate(FreeList* free_list) {
intptr_t free_bytes = 0;
free_bytes += small_list_.Concatenate(free_list->small_list());
free_bytes += medium_list_.Concatenate(free_list->medium_list());
free_bytes += large_list_.Concatenate(free_list->large_list());
free_bytes += huge_list_.Concatenate(free_list->huge_list());
return free_bytes;
}
void FreeList::Reset() {
small_list_.Reset();
medium_list_.Reset();
......@@ -2503,7 +2537,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
reinterpret_cast<intptr_t>(p));
}
DecreaseUnsweptFreeBytes(p);
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
freed_bytes +=
MarkCompactCollector::
SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
this, NULL, p);
}
p = next_page;
} while (p != anchor() && freed_bytes < bytes_to_sweep);
......@@ -2535,6 +2572,21 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->AreSweeperThreadsActivated()) {
if (FLAG_concurrent_sweeping &&
collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
collector->WaitUntilSweepingCompleted();
return true;
}
return false;
} else {
return AdvanceSweeper(size_in_bytes);
}
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
......@@ -2544,7 +2596,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
bool sweeping_complete = false;
for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
sweeping_complete = AdvanceSweeper(size_in_bytes);
sweeping_complete = EnsureSweeperProgress(size_in_bytes);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
......@@ -2567,7 +2619,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Last ditch, sweep all the remaining pages to try to find space. This may
// cause a pause.
if (!IsSweepingComplete()) {
AdvanceSweeper(kMaxInt);
EnsureSweeperProgress(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
......
......@@ -454,6 +454,18 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
intptr_t parallel_sweeping() const {
return parallel_sweeping_;
}
void set_parallel_sweeping(intptr_t state) {
parallel_sweeping_ = state;
}
bool TryParallelSweeping() {
return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
}
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
void ResetLiveBytes() {
......@@ -533,8 +545,8 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize =
kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
kIntSize + kIntSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
......@@ -686,6 +698,8 @@ class MemoryChunk {
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
intptr_t parallel_sweeping_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
......@@ -1395,7 +1409,17 @@ class FreeListNode: public HeapObject {
// the end element of the linked list of free memory blocks.
class FreeListCategory {
public:
FreeListCategory() : top_(NULL), end_(NULL), available_(0) {}
FreeListCategory() :
top_(NULL),
end_(NULL),
mutex_(OS::CreateMutex()),
available_(0) {}
~FreeListCategory() {
delete mutex_;
}
intptr_t Concatenate(FreeListCategory* category);
void Reset();
......@@ -1421,6 +1445,8 @@ class FreeListCategory {
int available() const { return available_; }
void set_available(int available) { available_ = available; }
Mutex* mutex() { return mutex_; }
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
......@@ -1429,6 +1455,7 @@ class FreeListCategory {
private:
FreeListNode* top_;
FreeListNode* end_;
Mutex* mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
......@@ -1462,6 +1489,8 @@ class FreeList BASE_EMBEDDED {
public:
explicit FreeList(PagedSpace* owner);
intptr_t Concatenate(FreeList* free_list);
// Clear the free list.
void Reset();
......@@ -1509,6 +1538,11 @@ class FreeList BASE_EMBEDDED {
intptr_t EvictFreeListItems(Page* p);
FreeListCategory* small_list() { return &small_list_; }
FreeListCategory* medium_list() { return &medium_list_; }
FreeListCategory* large_list() { return &large_list_; }
FreeListCategory* huge_list() { return &huge_list_; }
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
......@@ -1723,6 +1757,11 @@ class PagedSpace : public Space {
bool AdvanceSweeper(intptr_t bytes_to_sweep);
// When parallel sweeper threads are active this function waits
// for them to complete, otherwise AdvanceSweeper with size_in_bytes
// is called.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
bool IsSweepingComplete() {
return !first_unswept_page_->is_valid();
}
......@@ -1747,6 +1786,12 @@ class PagedSpace : public Space {
}
protected:
FreeList* free_list() { return &free_list_; }
void AddToAccountingStats(intptr_t bytes) {
accounting_stats_.DeallocateBytes(bytes);
}
int area_size_;
// Maximum capacity of this space.
......@@ -1796,6 +1841,7 @@ class PagedSpace : public Space {
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class SweeperThread;
};
......
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "sweeper-thread.h"
#include "v8.h"
#include "isolate.h"
#include "v8threads.h"
namespace v8 {
namespace internal {
SweeperThread::SweeperThread(Isolate* isolate)
: Thread("SweeperThread"),
isolate_(isolate),
heap_(isolate->heap()),
collector_(heap_->mark_compact_collector()),
start_sweeping_semaphore_(OS::CreateSemaphore(0)),
end_sweeping_semaphore_(OS::CreateSemaphore(0)),
stop_semaphore_(OS::CreateSemaphore(0)),
free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
private_free_list_old_pointer_space_(
heap_->paged_space(OLD_POINTER_SPACE)) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
}
bool SweeperThread::sweeping_pending_ = false;
void SweeperThread::Run() {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
while (true) {
start_sweeping_semaphore_->Wait();
if (Acquire_Load(&stop_thread_)) {
stop_semaphore_->Signal();
return;
}
collector_->SweepInParallel(heap_->old_data_space(),
&private_free_list_old_data_space_,
&free_list_old_data_space_);
collector_->SweepInParallel(heap_->old_pointer_space(),
&private_free_list_old_pointer_space_,
&free_list_old_pointer_space_);
end_sweeping_semaphore_->Signal();
}
}
intptr_t SweeperThread::StealMemory(PagedSpace* space) {
intptr_t free_bytes = 0;
if (space->identity() == OLD_POINTER_SPACE) {
free_bytes = space->free_list()->Concatenate(&free_list_old_pointer_space_);
space->AddToAccountingStats(free_bytes);
} else if (space->identity() == OLD_DATA_SPACE) {
free_bytes = space->free_list()->Concatenate(&free_list_old_data_space_);
space->AddToAccountingStats(free_bytes);
}
return free_bytes;
}
void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
start_sweeping_semaphore_->Signal();
stop_semaphore_->Wait();
}
void SweeperThread::StartSweeping() {
start_sweeping_semaphore_->Signal();
}
void SweeperThread::WaitForSweeperThread() {
end_sweeping_semaphore_->Wait();
}
} } // namespace v8::internal
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_SWEEPER_THREAD_H_
#define V8_SWEEPER_THREAD_H_
#include "atomicops.h"
#include "flags.h"
#include "platform.h"
#include "v8utils.h"
#include "spaces.h"
#include "heap.h"
namespace v8 {
namespace internal {
class SweeperThread : public Thread {
public:
explicit SweeperThread(Isolate* isolate);
void Run();
void Stop();
void StartSweeping();
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
static bool sweeping_pending() { return sweeping_pending_; }
static void set_sweeping_pending(bool sweeping_pending) {
sweeping_pending_ = sweeping_pending;
}
~SweeperThread() {
delete start_sweeping_semaphore_;
delete end_sweeping_semaphore_;
delete stop_semaphore_;
}
private:
Isolate* isolate_;
Heap* heap_;
MarkCompactCollector* collector_;
Semaphore* start_sweeping_semaphore_;
Semaphore* end_sweeping_semaphore_;
Semaphore* stop_semaphore_;
FreeList free_list_old_data_space_;
FreeList free_list_old_pointer_space_;
FreeList private_free_list_old_data_space_;
FreeList private_free_list_old_pointer_space_;
volatile AtomicWord stop_thread_;
static bool sweeping_pending_;
};
} } // namespace v8::internal
#endif // V8_SWEEPER_THREAD_H_
......@@ -461,6 +461,8 @@
'../../src/strtod.h',
'../../src/stub-cache.cc',
'../../src/stub-cache.h',
'../../src/sweeper-thread.h',
'../../src/sweeper-thread.cc',
'../../src/token.cc',
'../../src/token.h',
'../../src/transitions-inl.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment