Commit ce916ff9 authored by hpayer@chromium.org's avatar hpayer@chromium.org

Allow main thread to contribute to the sweeping phase.

BUG=
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/380653003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22318 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4057ed91
......@@ -559,7 +559,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
private:
// v8::Task overrides.
virtual void Run() V8_OVERRIDE {
heap_->mark_compact_collector()->SweepInParallel(space_);
heap_->mark_compact_collector()->SweepInParallel(space_, 0);
heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
}
......@@ -3544,7 +3544,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
switch (space->identity()) {
case OLD_DATA_SPACE:
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
......@@ -3939,7 +3939,7 @@ static intptr_t Free(PagedSpace* space,
FreeList* free_list,
Address start,
int size) {
if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
return space->Free(start, size);
} else {
return size - free_list->Free(start, size);
......@@ -3948,9 +3948,9 @@ static intptr_t Free(PagedSpace* space,
// Force instantiation of templatized SweepConservatively method for
// SWEEP_SEQUENTIALLY mode.
// SWEEP_ON_MAIN_THREAD mode.
template intptr_t MarkCompactCollector::
SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
PagedSpace*, FreeList*, Page*);
......@@ -3975,16 +3975,19 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
free_list != NULL) ||
(mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
(mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
free_list == NULL));
// When parallel sweeping is active, the page will be marked after
// sweeping by the main thread.
if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
} else {
p->MarkSweptConservatively();
}
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
size_t size = 0;
// Skip over all the dead objects at the start of the page and mark them free.
......@@ -3999,8 +4002,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
if (it.Done()) {
size = p->area_end() - p->area_start();
freed_bytes += Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size));
freed_bytes = Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
......@@ -4010,8 +4014,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
Address free_end = StartOfLiveObject(cell_base, *cell);
// Free the first free space.
size = free_end - p->area_start();
freed_bytes += Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size));
freed_bytes = Free<mode>(space, free_list, p->area_start(),
static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
......@@ -4036,8 +4041,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(cell_base, *cell);
freed_bytes += Free<mode>(space, free_list, free_start,
static_cast<int>(free_end - free_start));
freed_bytes = Free<mode>(space, free_list, free_start,
static_cast<int>(free_end - free_start));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
}
// Update our undigested record of where the current free area started.
......@@ -4051,31 +4057,40 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
// Handle the free space at the end of the page.
if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
freed_bytes += Free<mode>(space, free_list, free_start,
static_cast<int>(p->area_end() - free_start));
freed_bytes = Free<mode>(space, free_list, free_start,
static_cast<int>(p->area_end() - free_start));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
p->ResetLiveBytes();
return freed_bytes;
return max_freed_bytes;
}
void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes) {
PageIterator it(space);
FreeList* free_list = space == heap()->old_pointer_space()
? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get();
FreeList private_free_list(space);
int max_freed = 0;
int max_freed_overall = 0;
while (it.has_next()) {
Page* p = it.next();
if (p->TryParallelSweeping()) {
SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
space, &private_free_list, p);
free_list->Concatenate(&private_free_list);
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
return max_freed;
}
max_freed_overall = Max(max_freed, max_freed_overall);
}
if (p == space->end_of_unswept_pages()) break;
}
return max_freed_overall;
}
......@@ -4131,7 +4146,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++;
break;
}
......@@ -4142,7 +4157,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++;
parallel_sweeping_active = true;
} else {
......
......@@ -577,7 +577,7 @@ class MarkCompactCollector {
};
enum SweepingParallelism {
SWEEP_SEQUENTIALLY,
SWEEP_ON_MAIN_THREAD,
SWEEP_IN_PARALLEL
};
......@@ -590,7 +590,7 @@ class MarkCompactCollector {
#endif
// Sweep a single page from the given space conservatively.
// Return a number of reclaimed bytes.
// Returns the size of the biggest continuous freed memory chunk in bytes.
template<SweepingParallelism type>
static intptr_t SweepConservatively(PagedSpace* space,
FreeList* free_list,
......@@ -659,8 +659,11 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; }
// Concurrent and parallel sweeping support.
void SweepInParallel(PagedSpace* space);
// Concurrent and parallel sweeping support. If required_freed_bytes was set
// to a value larger than 0, then sweeping returns after a block of at least
// required_freed_bytes was freed. If required_freed_bytes was set to zero
// then the whole given space is swept.
int SweepInParallel(PagedSpace* space, int required_freed_bytes);
void WaitUntilSweepingCompleted();
......
......@@ -2581,12 +2581,23 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
// If sweeper threads are still running, wait for them.
if (collector->IsConcurrentSweepingInProgress(this)) {
// If sweeping is still in progress try to sweep pages on the main thread.
int free_chunk =
collector->SweepInParallel(this, size_in_bytes);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
ASSERT(object != NULL);
if (object != NULL) return object;
}
// Wait for the sweeper threads here and complete the sweeping phase.
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
......@@ -2617,7 +2628,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
&& heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object;
}
......@@ -2630,7 +2641,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
return EnsureSweepingProgress(size_in_bytes);
}
......
......@@ -2014,8 +2014,10 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
MUST_USE_RESULT HeapObject*
WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
......
......@@ -41,8 +41,8 @@ void SweeperThread::Run() {
return;
}
collector_->SweepInParallel(heap_->old_data_space());
collector_->SweepInParallel(heap_->old_pointer_space());
collector_->SweepInParallel(heap_->old_data_space(), 0);
collector_->SweepInParallel(heap_->old_pointer_space(), 0);
end_sweeping_semaphore_.Signal();
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment