Commit ce916ff9 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Allow main thread to contribute to the sweeping phase.

BUG=
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/380653003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22318 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4057ed91
...@@ -559,7 +559,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task { ...@@ -559,7 +559,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
private: private:
// v8::Task overrides. // v8::Task overrides.
virtual void Run() V8_OVERRIDE { virtual void Run() V8_OVERRIDE {
heap_->mark_compact_collector()->SweepInParallel(space_); heap_->mark_compact_collector()->SweepInParallel(space_, 0);
heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal(); heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
} }
...@@ -3544,7 +3544,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3544,7 +3544,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
switch (space->identity()) { switch (space->identity()) {
case OLD_DATA_SPACE: case OLD_DATA_SPACE:
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
break; break;
case OLD_POINTER_SPACE: case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
...@@ -3939,7 +3939,7 @@ static intptr_t Free(PagedSpace* space, ...@@ -3939,7 +3939,7 @@ static intptr_t Free(PagedSpace* space,
FreeList* free_list, FreeList* free_list,
Address start, Address start,
int size) { int size) {
if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
return space->Free(start, size); return space->Free(start, size);
} else { } else {
return size - free_list->Free(start, size); return size - free_list->Free(start, size);
...@@ -3948,9 +3948,9 @@ static intptr_t Free(PagedSpace* space, ...@@ -3948,9 +3948,9 @@ static intptr_t Free(PagedSpace* space,
// Force instantiation of templatized SweepConservatively method for // Force instantiation of templatized SweepConservatively method for
// SWEEP_SEQUENTIALLY mode. // SWEEP_ON_MAIN_THREAD mode.
template intptr_t MarkCompactCollector:: template intptr_t MarkCompactCollector::
SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
PagedSpace*, FreeList*, Page*); PagedSpace*, FreeList*, Page*);
...@@ -3975,16 +3975,19 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -3975,16 +3975,19 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
free_list != NULL) || free_list != NULL) ||
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
free_list == NULL)); free_list == NULL));
// When parallel sweeping is active, the page will be marked after // When parallel sweeping is active, the page will be marked after
// sweeping by the main thread. // sweeping by the main thread.
if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) { if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
} else {
p->MarkSweptConservatively(); p->MarkSweptConservatively();
} }
intptr_t freed_bytes = 0; intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
size_t size = 0; size_t size = 0;
// Skip over all the dead objects at the start of the page and mark them free. // Skip over all the dead objects at the start of the page and mark them free.
...@@ -3999,8 +4002,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -3999,8 +4002,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
if (it.Done()) { if (it.Done()) {
size = p->area_end() - p->area_start(); size = p->area_end() - p->area_start();
freed_bytes += Free<mode>(space, free_list, p->area_start(), freed_bytes = Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size)); static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
ASSERT_EQ(0, p->LiveBytes()); ASSERT_EQ(0, p->LiveBytes());
return freed_bytes; return freed_bytes;
} }
...@@ -4010,8 +4014,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -4010,8 +4014,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
Address free_end = StartOfLiveObject(cell_base, *cell); Address free_end = StartOfLiveObject(cell_base, *cell);
// Free the first free space. // Free the first free space.
size = free_end - p->area_start(); size = free_end - p->area_start();
freed_bytes += Free<mode>(space, free_list, p->area_start(), freed_bytes = Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size)); static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
// The start of the current free area is represented in undigested form by // The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and // the address of the last 32-word section that contained a live object and
...@@ -4036,8 +4041,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -4036,8 +4041,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
// so now we need to find the start of the first live object at the // so now we need to find the start of the first live object at the
// end of the free space. // end of the free space.
free_end = StartOfLiveObject(cell_base, *cell); free_end = StartOfLiveObject(cell_base, *cell);
freed_bytes += Free<mode>(space, free_list, free_start, freed_bytes = Free<mode>(space, free_list, free_start,
static_cast<int>(free_end - free_start)); static_cast<int>(free_end - free_start));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} }
} }
// Update our undigested record of where the current free area started. // Update our undigested record of where the current free area started.
...@@ -4051,31 +4057,40 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, ...@@ -4051,31 +4057,40 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
// Handle the free space at the end of the page. // Handle the free space at the end of the page.
if (cell_base - free_start > 32 * kPointerSize) { if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell); free_start = DigestFreeStart(free_start, free_start_cell);
freed_bytes += Free<mode>(space, free_list, free_start, freed_bytes = Free<mode>(space, free_list, free_start,
static_cast<int>(p->area_end() - free_start)); static_cast<int>(p->area_end() - free_start));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} }
p->ResetLiveBytes(); p->ResetLiveBytes();
return freed_bytes; return max_freed_bytes;
} }
void MarkCompactCollector::SweepInParallel(PagedSpace* space) { int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes) {
PageIterator it(space); PageIterator it(space);
FreeList* free_list = space == heap()->old_pointer_space() FreeList* free_list = space == heap()->old_pointer_space()
? free_list_old_pointer_space_.get() ? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get(); : free_list_old_data_space_.get();
FreeList private_free_list(space); FreeList private_free_list(space);
int max_freed = 0;
int max_freed_overall = 0;
while (it.has_next()) { while (it.has_next()) {
Page* p = it.next(); Page* p = it.next();
if (p->TryParallelSweeping()) { if (p->TryParallelSweeping()) {
SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p); max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
space, &private_free_list, p);
free_list->Concatenate(&private_free_list); free_list->Concatenate(&private_free_list);
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE); if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
return max_freed;
}
max_freed_overall = Max(max_freed, max_freed_overall);
} }
if (p == space->end_of_unswept_pages()) break; if (p == space->end_of_unswept_pages()) break;
} }
return max_freed_overall;
} }
...@@ -4131,7 +4146,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -4131,7 +4146,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p)); reinterpret_cast<intptr_t>(p));
} }
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++; pages_swept++;
break; break;
} }
...@@ -4142,7 +4157,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -4142,7 +4157,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p)); reinterpret_cast<intptr_t>(p));
} }
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++; pages_swept++;
parallel_sweeping_active = true; parallel_sweeping_active = true;
} else { } else {
......
...@@ -577,7 +577,7 @@ class MarkCompactCollector { ...@@ -577,7 +577,7 @@ class MarkCompactCollector {
}; };
enum SweepingParallelism { enum SweepingParallelism {
SWEEP_SEQUENTIALLY, SWEEP_ON_MAIN_THREAD,
SWEEP_IN_PARALLEL SWEEP_IN_PARALLEL
}; };
...@@ -590,7 +590,7 @@ class MarkCompactCollector { ...@@ -590,7 +590,7 @@ class MarkCompactCollector {
#endif #endif
// Sweep a single page from the given space conservatively. // Sweep a single page from the given space conservatively.
// Return a number of reclaimed bytes. // Returns the size of the biggest continuous freed memory chunk in bytes.
template<SweepingParallelism type> template<SweepingParallelism type>
static intptr_t SweepConservatively(PagedSpace* space, static intptr_t SweepConservatively(PagedSpace* space,
FreeList* free_list, FreeList* free_list,
...@@ -659,8 +659,11 @@ class MarkCompactCollector { ...@@ -659,8 +659,11 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; } MarkingParity marking_parity() { return marking_parity_; }
// Concurrent and parallel sweeping support. // Concurrent and parallel sweeping support. If required_freed_bytes was set
void SweepInParallel(PagedSpace* space); // to a value larger than 0, then sweeping returns after a block of at least
// required_freed_bytes was freed. If required_freed_bytes was set to zero
// then the whole given space is swept.
int SweepInParallel(PagedSpace* space, int required_freed_bytes);
void WaitUntilSweepingCompleted(); void WaitUntilSweepingCompleted();
......
...@@ -2581,12 +2581,23 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { ...@@ -2581,12 +2581,23 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
} }
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) { int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
// If sweeper threads are still running, wait for them.
if (collector->IsConcurrentSweepingInProgress(this)) { if (collector->IsConcurrentSweepingInProgress(this)) {
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk =
collector->SweepInParallel(this, size_in_bytes);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
ASSERT(object != NULL);
if (object != NULL) return object;
}
// Wait for the sweeper threads here and complete the sweeping phase.
collector->WaitUntilSweepingCompleted(); collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list // After waiting for the sweeper threads, there may be new free-list
...@@ -2617,7 +2628,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2617,7 +2628,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
&& heap()->OldGenerationAllocationLimitReached()) { && heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal // If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. // elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object; if (object != NULL) return object;
} }
...@@ -2630,7 +2641,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2630,7 +2641,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal // If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which // elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation. // would indicate that there is not enough memory for the given allocation.
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); return EnsureSweepingProgress(size_in_bytes);
} }
......
...@@ -2014,8 +2014,10 @@ class PagedSpace : public Space { ...@@ -2014,8 +2014,10 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_. // address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes); inline HeapObject* AllocateLinearly(int size_in_bytes);
MUST_USE_RESULT HeapObject* // If sweeping is still in progress try to sweep unswept pages. If that is
WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes); // not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent. // Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
......
...@@ -41,8 +41,8 @@ void SweeperThread::Run() { ...@@ -41,8 +41,8 @@ void SweeperThread::Run() {
return; return;
} }
collector_->SweepInParallel(heap_->old_data_space()); collector_->SweepInParallel(heap_->old_data_space(), 0);
collector_->SweepInParallel(heap_->old_pointer_space()); collector_->SweepInParallel(heap_->old_pointer_space(), 0);
end_sweeping_semaphore_.Signal(); end_sweeping_semaphore_.Signal();
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment