Commit 6b1ae170 authored by jochen@chromium.org's avatar jochen@chromium.org

Move the management of the already swept pages to MarkCompactCollector

That way, MCC doesn't need to know about the state the threads are in to
steal free memory from them.

BUG=v8:3104
R=hpayer@chromium.org
LOG=n

Review URL: https://codereview.chromium.org/143283002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18706 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 342b534e
......@@ -6531,6 +6531,8 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
mark_compact_collector()->SetUp();
if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
return true;
......
......@@ -348,6 +348,12 @@ static void VerifyNativeContextSeparation(Heap* heap) {
#endif
void MarkCompactCollector::SetUp() {
free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
}
void MarkCompactCollector::TearDown() {
AbortCompaction();
}
......@@ -586,10 +592,10 @@ void MarkCompactCollector::WaitUntilSweepingCompleted() {
intptr_t MarkCompactCollector::
StealMemoryFromSweeperThreads(PagedSpace* space) {
intptr_t freed_bytes = 0;
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
}
FreeList* free_list = space == heap()->old_pointer_space()
? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get();
intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
space->AddToAccountingStats(freed_bytes);
space->DecrementUnsweptFreeBytes(freed_bytes);
return freed_bytes;
......@@ -3971,9 +3977,11 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
void MarkCompactCollector::SweepInParallel(PagedSpace* space,
FreeList* private_free_list,
FreeList* free_list) {
FreeList* private_free_list) {
PageIterator it(space);
FreeList* free_list = space == heap()->old_pointer_space()
? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get();
while (it.has_next()) {
Page* p = it.next();
......
......@@ -571,6 +571,8 @@ class MarkCompactCollector {
static void Initialize();
void SetUp();
void TearDown();
void CollectEvacuationCandidates(PagedSpace* space);
......@@ -716,8 +718,7 @@ class MarkCompactCollector {
// Concurrent and parallel sweeping support.
void SweepInParallel(PagedSpace* space,
FreeList* private_free_list,
FreeList* free_list);
FreeList* private_free_list);
void WaitUntilSweepingCompleted();
......@@ -957,6 +958,9 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
SmartPointer<FreeList> free_list_old_data_space_;
SmartPointer<FreeList> free_list_old_pointer_space_;
friend class Heap;
};
......
......@@ -1576,7 +1576,7 @@ class FreeListCategory {
// These spaces are call large.
// At least 16384 words. This list is for objects of 2048 words or larger.
// Empty pages are added to this list. These spaces are called huge.
class FreeList BASE_EMBEDDED {
class FreeList {
public:
explicit FreeList(PagedSpace* owner);
......@@ -1945,7 +1945,7 @@ class PagedSpace : public Space {
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class SweeperThread;
friend class MarkCompactCollector;
};
......
......@@ -45,8 +45,6 @@ SweeperThread::SweeperThread(Isolate* isolate)
start_sweeping_semaphore_(0),
end_sweeping_semaphore_(0),
stop_semaphore_(0),
free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
private_free_list_old_pointer_space_(
heap_->paged_space(OLD_POINTER_SPACE)) {
......@@ -69,26 +67,14 @@ void SweeperThread::Run() {
}
collector_->SweepInParallel(heap_->old_data_space(),
&private_free_list_old_data_space_,
&free_list_old_data_space_);
&private_free_list_old_data_space_);
collector_->SweepInParallel(heap_->old_pointer_space(),
&private_free_list_old_pointer_space_,
&free_list_old_pointer_space_);
&private_free_list_old_pointer_space_);
end_sweeping_semaphore_.Signal();
}
}
intptr_t SweeperThread::StealMemory(PagedSpace* space) {
if (space->identity() == OLD_POINTER_SPACE) {
return space->free_list()->Concatenate(&free_list_old_pointer_space_);
} else if (space->identity() == OLD_DATA_SPACE) {
return space->free_list()->Concatenate(&free_list_old_data_space_);
}
return 0;
}
void SweeperThread::Stop() {
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
start_sweeping_semaphore_.Signal();
......
......@@ -49,7 +49,6 @@ class SweeperThread : public Thread {
void Stop();
void StartSweeping();
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
static int NumberOfThreads(int max_available);
......@@ -60,8 +59,6 @@ class SweeperThread : public Thread {
Semaphore start_sweeping_semaphore_;
Semaphore end_sweeping_semaphore_;
Semaphore stop_semaphore_;
FreeList free_list_old_data_space_;
FreeList free_list_old_pointer_space_;
FreeList private_free_list_old_data_space_;
FreeList private_free_list_old_pointer_space_;
volatile AtomicWord stop_thread_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment