Commit 0924d6d5 authored by hpayer's avatar hpayer Committed by Commit bot

Reland of land concurrent sweeping of code space. (patchset #1 id:1 of...

Reland of land concurrent sweeping of code space. (patchset #1 id:1 of https://codereview.chromium.org/1263343002/)

Reason for revert:
Bogus revert.

Original issue's description:
> Revert of Reland concurrent sweeping of code space. (patchset #6 id:100001 of https://codereview.chromium.org/1242333002/)
>
> Reason for revert:
> Reverted because 507840 came back on recent Chromecrash. Should not have committed this Cl.
>
> Original issue's description:
> > Reland concurrent sweeping of code space.
> >
> > BUG=
> >
> > Committed: https://crrev.com/8516dccf6a561020441773c93c564dd4aa6ee59e
> > Cr-Commit-Position: refs/heads/master@{#29967}
>
> TBR=jochen@chromium.org
> NOPRESUBMIT=true
> NOTREECHECKS=true
> NOTRY=true
> BUG=
>
> Committed: https://crrev.com/5c6e7d00438cc82a5584e3178d7dadf36e4a34f8
> Cr-Commit-Position: refs/heads/master@{#29975}

TBR=jochen@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1271773002

Cr-Commit-Position: refs/heads/master@{#29976}
parent 5c6e7d00
......@@ -1446,6 +1446,9 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), heap->code_space());
heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = heap->code_space()->top();
......
......@@ -509,6 +509,7 @@ const char* Heap::GetSpaceName(int idx) {
void Heap::ClearAllICsByKind(Code::Kind kind) {
// TODO(mvstanton): Do not iterate the heap.
HeapObjectIterator it(code_space());
for (Object* object = it.Next(); object != NULL; object = it.Next()) {
......@@ -5195,6 +5196,11 @@ void Heap::Verify() {
code_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
mark_compact_collector_.VerifyWeakEmbeddedObjectsInCode();
if (FLAG_omit_map_checks_for_leaf_maps) {
mark_compact_collector_.VerifyOmittedMapChecks();
}
}
#endif
......
......@@ -226,6 +226,7 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
}
......@@ -368,13 +369,6 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
#ifdef VERIFY_HEAP
VerifyWeakEmbeddedObjectsInCode();
if (FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
}
#endif
Finish();
if (marking_parity_ == EVEN_MARKING_PARITY) {
......@@ -501,9 +495,27 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() {
DCHECK(free_list_old_space_.get()->IsEmpty());
DCHECK(free_list_code_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), heap()->old_space()),
v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), heap()->code_space()),
v8::Platform::kShortRunningTask);
}
void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (!page->SweepingCompleted()) {
SweepInParallel(page, owner);
if (!page->SweepingCompleted()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
// thread to be done with this page.
page->WaitUntilSweepingCompleted();
}
}
}
......@@ -514,15 +526,19 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
// here.
if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
}
// Wait twice for both jobs.
if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE));
RefillFreeList(heap()->paged_space(CODE_SPACE));
heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
......@@ -547,6 +563,8 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
if (space == heap()->old_space()) {
free_list = free_list_old_space_.get();
} else if (space == heap()->code_space()) {
free_list = free_list_code_space_.get();
} else {
// Any PagedSpace might invoke RefillFreeLists, so we need to make sure
// to only refill them for the old space.
......@@ -3479,14 +3497,17 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
int offsets[16];
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
// deoptimizer.
SkipList* skip_list = p->skip_list();
int curr_region = -1;
if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
skip_list->Clear();
}
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
int curr_region = -1;
for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
Address cell_base = it.CurrentCellBase();
......@@ -4247,10 +4268,19 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return 0;
}
page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS);
FreeList* free_list = free_list_old_space_.get();
FreeList* free_list;
FreeList private_free_list(space);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
if (space->identity() == CODE_SPACE) {
free_list = free_list_code_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
} else {
free_list = free_list_old_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
}
free_list->Concatenate(&private_free_list);
page->mutex()->Unlock();
}
......@@ -4308,8 +4338,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
reinterpret_cast<intptr_t>(p));
}
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
if (space->identity() == CODE_SPACE) {
if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
} else {
DCHECK(space->identity() == OLD_SPACE);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
pages_swept++;
parallel_sweeping_active = true;
} else {
......@@ -4326,13 +4367,17 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
}
if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else if (space->identity() == CODE_SPACE) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
if (space->identity() == CODE_SPACE) {
if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
} else {
DCHECK(space->identity() == OLD_SPACE ||
space->identity() == MAP_SPACE);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
......@@ -4372,21 +4417,24 @@ void MarkCompactCollector::SweepSpaces() {
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_OLDSPACE);
{ SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); }
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_OLDSPACE);
SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING);
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING);
}
sweeping_in_progress_ = true;
if (heap()->concurrent_sweeping_enabled()) {
StartSweeperThreads();
}
}
RemoveDeadInvalidatedCode();
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
}
RemoveDeadInvalidatedCode();
EvacuateNewSpaceAndCandidates();
......@@ -4439,6 +4487,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
void MarkCompactCollector::ParallelSweepSpacesComplete() {
ParallelSweepSpaceComplete(heap()->old_space());
ParallelSweepSpaceComplete(heap()->code_space());
}
......
......@@ -699,6 +699,8 @@ class MarkCompactCollector {
void EnsureSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
......@@ -979,6 +981,7 @@ class MarkCompactCollector {
List<Code*> invalidated_code_;
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
friend class Heap;
};
......
......@@ -75,6 +75,8 @@ bool HeapObjectIterator::AdvanceToNextPage() {
}
cur_page = cur_page->next_page();
if (cur_page == space_->anchor()) return false;
cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
cur_page);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
......
......@@ -468,16 +468,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
}
} else {
if (!page->SweepingCompleted()) {
heap_->mark_compact_collector()->SweepInParallel(page, owner);
if (!page->SweepingCompleted()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
// thread to be done with this page.
page->WaitUntilSweepingCompleted();
}
}
CHECK(page->owner() == heap_->old_space());
heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
page);
HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment