Commit 68a72399 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[heap] Move pointers updating to ItemParallelJob"

This reverts commit 81d3427b.

Reason for revert: Several gc related failures, e.g.:
https://build.chromium.org/p/client.v8/builders/V8%20Mac%20GC%20Stress/builds/13488

Original change's description:
> [heap] Move pointers updating to ItemParallelJob
> 
> Furthermore avoid lock-step between pointer updating phases as they
> should execute in parallel without synchronization restrictions.
> 
> Bug: chromium:726040
> Change-Id: I26ce0d1f2a4637ff5610cae556113e3d736788e2
> Reviewed-on: https://chromium-review.googlesource.com/518103
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Reviewed-by: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#45574}

NOPRESUBMIT=true
NOTRY=true
NOTREECHECKS=true

TBR=ulan@chromium.org,hpayer@chromium.org,mlippautz@chromium.org
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:726040

Change-Id: I60860aef390605d07bc520141cab9d5be9b712b3
Reviewed-on: https://chromium-review.googlesource.com/518106
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45575}
parent 81d3427b
......@@ -538,8 +538,10 @@ void GCTracer::PrintNVP() const {
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new_roots=%.2f "
"evacuate.update_pointers.slots=%.2f "
"evacuate.update_pointers.to_new=%.2f "
"evacuate.update_pointers.to_new.tospace=%.2f "
"evacuate.update_pointers.to_new.roots=%.2f "
"evacuate.update_pointers.to_new.old=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
......@@ -556,9 +558,12 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MINOR_MC_EVACUATE],
current_.scopes[Scope::MINOR_MC_EVACUATE_COPY],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE],
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
......@@ -593,8 +598,8 @@ void GCTracer::PrintNVP() const {
"evacuate.epilogue=%.1f "
"evacuate.rebalance=%.1f "
"evacuate.update_pointers=%.1f "
"evacuate.update_pointers.to_new_roots=%.1f "
"evacuate.update_pointers.slots=%.1f "
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
"evacuate.update_pointers.weak=%.1f "
"finish=%.1f "
"mark=%.1f "
......@@ -677,8 +682,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE_REBALANCE],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
......
......@@ -34,79 +34,81 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MINOR_MC) \
F(MINOR_MC_CLEAR) \
F(MINOR_MC_CLEAR_STRING_TABLE) \
F(MINOR_MC_CLEAR_WEAK_LISTS) \
F(MINOR_MC_EVACUATE) \
F(MINOR_MC_EVACUATE_CLEAN_UP) \
F(MINOR_MC_EVACUATE_COPY) \
F(MINOR_MC_EVACUATE_EPILOGUE) \
F(MINOR_MC_EVACUATE_PROLOGUE) \
F(MINOR_MC_EVACUATE_REBALANCE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MINOR_MC) \
F(MINOR_MC_CLEAR) \
F(MINOR_MC_CLEAR_STRING_TABLE) \
F(MINOR_MC_CLEAR_WEAK_LISTS) \
F(MINOR_MC_EVACUATE) \
F(MINOR_MC_EVACUATE_CLEAN_UP) \
F(MINOR_MC_EVACUATE_COPY) \
F(MINOR_MC_EVACUATE_EPILOGUE) \
F(MINOR_MC_EVACUATE_PROLOGUE) \
F(MINOR_MC_EVACUATE_REBALANCE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
......
......@@ -4235,91 +4235,58 @@ void MarkCompactCollector::Evacuate() {
#endif
}
class UpdatingItem : public ItemParallelJob::Item {
public:
virtual ~UpdatingItem() {}
virtual void Process() = 0;
};
class PointersUpatingTask : public ItemParallelJob::Task {
template <RememberedSetType type>
class PointerUpdateJobTraits {
public:
explicit PointersUpatingTask(Isolate* isolate)
: ItemParallelJob::Task(isolate) {}
void RunInParallel() override {
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
};
};
typedef int PerPageData; // Per page data is not used in this job.
typedef const MarkCompactCollectorBase* PerTaskData;
class ToSpaceUpdatingItem : public UpdatingItem {
public:
explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
MarkingState marking_state)
: chunk_(chunk),
start_(start),
end_(end),
marking_state_(marking_state) {}
virtual ~ToSpaceUpdatingItem() {}
void Process() override {
if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration using
// markbits.
ProcessVisitLive();
} else {
ProcessVisitAll();
}
static void ProcessPageInParallel(Heap* heap, PerTaskData task_data,
MemoryChunk* chunk, PerPageData) {
base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
UpdateUntypedPointers(heap, chunk, task_data);
UpdateTypedPointers(heap, chunk, task_data);
}
private:
void ProcessVisitAll() {
PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, &visitor);
cur += size;
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
const MarkCompactCollectorBase* collector) {
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::Iterate(
chunk, [heap, collector](Address slot) {
return CheckAndUpdateOldToNewSlot(heap, slot, collector);
});
} else {
RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
return UpdateSlot(reinterpret_cast<Object**>(slot));
});
}
}
void ProcessVisitLive() {
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
PointersUpdatingVisitor visitor;
LiveObjectIterator<kAllLiveObjects> it(chunk_, marking_state_);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
object->IterateBodyFast(&visitor);
static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
const MarkCompactCollectorBase* collector) {
if (type == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk,
[isolate](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
slot, UpdateSlot);
});
} else {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk, [isolate, heap, collector](SlotType slot_type,
Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [heap, collector](Object** slot) {
return CheckAndUpdateOldToNewSlot(
heap, reinterpret_cast<Address>(slot), collector);
});
});
}
}
MemoryChunk* chunk_;
Address start_;
Address end_;
MarkingState marking_state_;
};
template <RememberedSetType type>
class RememberedSetUpdatingItem : public UpdatingItem {
public:
explicit RememberedSetUpdatingItem(Heap* heap,
MarkCompactCollectorBase* collector,
MemoryChunk* chunk)
: heap_(heap), collector_(collector), chunk_(chunk) {}
virtual ~RememberedSetUpdatingItem() {}
void Process() override {
base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
static SlotCallbackResult CheckAndUpdateOldToNewSlot(
Heap* heap, Address slot_address,
const MarkCompactCollectorBase* collector) {
......@@ -4356,105 +4323,109 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
return REMOVE_SLOT;
}
};
void UpdateUntypedPointers() {
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::Iterate(chunk_, [this](Address slot) {
return CheckAndUpdateOldToNewSlot(heap_, slot, collector_);
});
template <RememberedSetType type>
void MarkCompactCollectorBase::UpdatePointersInParallel(
base::Semaphore* semaphore) {
PageParallelJob<PointerUpdateJobTraits<type> > job(
heap(), heap()->isolate()->cancelable_task_manager(), semaphore);
RememberedSet<type>::IterateMemoryChunks(
heap(), [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
const int num_pages = job.NumberOfPages();
if (num_pages == 0) return;
const int num_tasks =
NumberOfParallelPointerUpdateTasks(num_pages, old_to_new_slots_);
job.Run(num_tasks, [this](int i) { return this; });
}
class ToSpacePointerUpdateJobTraits {
public:
struct PageData {
Address start;
Address end;
MarkingState marking_state;
};
typedef PageData PerPageData;
typedef PointersUpdatingVisitor* PerTaskData;
static void ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData page_data) {
if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration
// using markbits.
ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data);
} else {
RememberedSet<OLD_TO_OLD>::Iterate(chunk_, [](Address slot) {
return UpdateSlot(reinterpret_cast<Object**>(slot));
});
ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
}
}
void UpdateTypedPointers() {
Isolate* isolate = heap_->isolate();
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_,
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [this](Object** slot) {
return CheckAndUpdateOldToNewSlot(
heap_, reinterpret_cast<Address>(slot), collector_);
});
});
} else {
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk_,
[isolate](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
slot, UpdateSlot);
});
private:
static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData page_data) {
for (Address cur = page_data.start; cur < page_data.end;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, visitor);
cur += size;
}
}
Heap* heap_;
MarkCompactCollectorBase* collector_;
MemoryChunk* chunk_;
static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData page_data) {
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
LiveObjectIterator<kAllLiveObjects> it(chunk, page_data.marking_state);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, visitor);
}
}
};
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
ItemParallelJob* job) {
// Seed to space pages.
void MarkCompactCollectorBase::UpdateToSpacePointersInParallel(
base::Semaphore* semaphore) {
PageParallelJob<ToSpacePointerUpdateJobTraits> job(
heap(), isolate()->cancelable_task_manager(), semaphore);
const Address space_start = heap()->new_space()->bottom();
const Address space_end = heap()->new_space()->top();
int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
job->AddItem(
new ToSpaceUpdatingItem(page, start, end, marking_state(page)));
pages++;
job.AddPage(page, {start, end, marking_state(page)});
}
if (pages == 0) return 0;
return NumberOfParallelToSpacePointerUpdateTasks(pages);
}
template <RememberedSetType type>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
ItemParallelJob* job) {
int pages = 0;
RememberedSet<type>::IterateMemoryChunks(
heap(), [this, &job, &pages](MemoryChunk* chunk) {
job->AddItem(new RememberedSetUpdatingItem<type>(heap(), this, chunk));
pages++;
});
if (pages == 0) return 0;
return NumberOfParallelPointerUpdateTasks(
pages, type == OLD_TO_NEW ? old_to_new_slots_ : -1);
PointersUpdatingVisitor visitor;
const int num_tasks =
NumberOfParallelToSpacePointerUpdateTasks(job.NumberOfPages());
job.Run(num_tasks, [&visitor](int i) { return &visitor; });
}
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int remembered_set_tasks_old_new =
CollectRememberedSetUpdatingItems<OLD_TO_NEW>(&updating_job);
const int remembered_set_tasks_old_old =
CollectRememberedSetUpdatingItems<OLD_TO_OLD>(&updating_job);
const int num_tasks = Max(to_space_tasks, Max(remembered_set_tasks_old_new,
remembered_set_tasks_old_old));
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpatingTask(isolate()));
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
UpdateToSpacePointersInParallel(&page_parallel_job_semaphore_);
// Update roots.
PointersUpdatingVisitor updating_visitor;
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
UpdatePointersInParallel<OLD_TO_NEW>(&page_parallel_job_semaphore_);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run();
Heap* heap = this->heap();
TRACE_GC(heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdatePointersInParallel<OLD_TO_OLD>(&page_parallel_job_semaphore_);
}
{
......@@ -4474,26 +4445,26 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int remembered_set_tasks =
CollectRememberedSetUpdatingItems<OLD_TO_NEW>(&updating_job);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpatingTask(isolate()));
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run();
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
{
TRACE_GC(
heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE);
UpdateToSpacePointersInParallel(&page_parallel_job_semaphore_);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD);
UpdatePointersInParallel<OLD_TO_NEW>(&page_parallel_job_semaphore_);
}
}
{
......
......@@ -22,7 +22,6 @@ namespace internal {
// Forward declarations.
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
class LocalWorkStealingMarkingDeque;
class MarkCompactCollector;
class MinorMarkCompactCollector;
......@@ -277,9 +276,9 @@ class MarkCompactCollectorBase {
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes);
int CollectToSpaceUpdatingItems(ItemParallelJob* job);
template <RememberedSetType type>
int CollectRememberedSetUpdatingItems(ItemParallelJob* job);
void UpdatePointersInParallel(base::Semaphore* semaphore);
void UpdateToSpacePointersInParallel(base::Semaphore* semaphore);
int NumberOfParallelCompactionTasks(int pages);
int NumberOfParallelPointerUpdateTasks(int pages, int slots);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment