Commit 4b7ce144 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

Reland "[heap] Move pointers updating to ItemParallelJob"

Furthermore avoid lock-step between pointer updating phases as they
should execute in parallel without synchronization restrictions.

This reverts commit bc6adb86.

Bug: chromium:726040
Change-Id: I1713d4333f0ce1604ff51c02461f3ef91e4bdaed
Reviewed-on: https://chromium-review.googlesource.com/521062Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45657}
parent a99c26b7
......@@ -538,10 +538,8 @@ void GCTracer::PrintNVP() const {
"evacuate=%.2f "
"evacuate.copy=%.2f "
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.to_new=%.2f "
"evacuate.update_pointers.to_new.tospace=%.2f "
"evacuate.update_pointers.to_new.roots=%.2f "
"evacuate.update_pointers.to_new.old=%.2f "
"evacuate.update_pointers.to_new_roots=%.2f "
"evacuate.update_pointers.slots=%.2f "
"update_marking_deque=%.2f "
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
......@@ -558,12 +556,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MINOR_MC_EVACUATE],
current_.scopes[Scope::MINOR_MC_EVACUATE_COPY],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE],
current_
.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD],
current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
......@@ -598,8 +593,8 @@ void GCTracer::PrintNVP() const {
"evacuate.epilogue=%.1f "
"evacuate.rebalance=%.1f "
"evacuate.update_pointers=%.1f "
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
"evacuate.update_pointers.to_new_roots=%.1f "
"evacuate.update_pointers.slots=%.1f "
"evacuate.update_pointers.weak=%.1f "
"finish=%.1f "
"mark=%.1f "
......@@ -682,8 +677,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE_REBALANCE],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
......
......@@ -34,81 +34,79 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MINOR_MC) \
F(MINOR_MC_CLEAR) \
F(MINOR_MC_CLEAR_STRING_TABLE) \
F(MINOR_MC_CLEAR_WEAK_LISTS) \
F(MINOR_MC_EVACUATE) \
F(MINOR_MC_EVACUATE_CLEAN_UP) \
F(MINOR_MC_EVACUATE_COPY) \
F(MINOR_MC_EVACUATE_EPILOGUE) \
F(MINOR_MC_EVACUATE_PROLOGUE) \
F(MINOR_MC_EVACUATE_REBALANCE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
F(HEAP_EPILOGUE) \
F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
F(HEAP_EXTERNAL_EPILOGUE) \
F(HEAP_EXTERNAL_PROLOGUE) \
F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(HEAP_PROLOGUE) \
F(MC_CLEAR) \
F(MC_CLEAR_DEPENDENT_CODE) \
F(MC_CLEAR_MAPS) \
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
F(MC_MARK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MINOR_MC) \
F(MINOR_MC_CLEAR) \
F(MINOR_MC_CLEAR_STRING_TABLE) \
F(MINOR_MC_CLEAR_WEAK_LISTS) \
F(MINOR_MC_EVACUATE) \
F(MINOR_MC_EVACUATE_CLEAN_UP) \
F(MINOR_MC_EVACUATE_COPY) \
F(MINOR_MC_EVACUATE_EPILOGUE) \
F(MINOR_MC_EVACUATE_PROLOGUE) \
F(MINOR_MC_EVACUATE_REBALANCE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SCAVENGER_EVACUATE) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
......
......@@ -4236,197 +4236,238 @@ void MarkCompactCollector::Evacuate() {
#endif
}
template <RememberedSetType type>
class PointerUpdateJobTraits {
class UpdatingItem : public ItemParallelJob::Item {
public:
typedef int PerPageData; // Per page data is not used in this job.
typedef const MarkCompactCollectorBase* PerTaskData;
virtual ~UpdatingItem() {}
virtual void Process() = 0;
};
static void ProcessPageInParallel(Heap* heap, PerTaskData task_data,
MemoryChunk* chunk, PerPageData) {
base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
UpdateUntypedPointers(heap, chunk, task_data);
UpdateTypedPointers(heap, chunk, task_data);
class PointersUpatingTask : public ItemParallelJob::Task {
public:
explicit PointersUpatingTask(Isolate* isolate)
: ItemParallelJob::Task(isolate) {}
void RunInParallel() override {
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
};
};
class ToSpaceUpdatingItem : public UpdatingItem {
public:
explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
MarkingState marking_state)
: chunk_(chunk),
start_(start),
end_(end),
marking_state_(marking_state) {}
virtual ~ToSpaceUpdatingItem() {}
void Process() override {
if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration using
// markbits.
ProcessVisitLive();
} else {
ProcessVisitAll();
}
}
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
const MarkCompactCollectorBase* collector) {
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::Iterate(
chunk, [heap, collector](Address slot) {
return CheckAndUpdateOldToNewSlot(heap, slot, collector);
});
} else {
RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
return UpdateSlot(reinterpret_cast<Object**>(slot));
});
void ProcessVisitAll() {
PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, &visitor);
cur += size;
}
}
static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
const MarkCompactCollectorBase* collector) {
if (type == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk,
[isolate](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
slot, UpdateSlot);
});
} else {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk, [isolate, heap, collector](SlotType slot_type,
Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [heap, collector](Object** slot) {
return CheckAndUpdateOldToNewSlot(
heap, reinterpret_cast<Address>(slot), collector);
});
});
void ProcessVisitLive() {
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
PointersUpdatingVisitor visitor;
LiveObjectIterator<kAllLiveObjects> it(chunk_, marking_state_);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
object->IterateBodyFast(&visitor);
}
}
static SlotCallbackResult CheckAndUpdateOldToNewSlot(
Heap* heap, Address slot_address,
const MarkCompactCollectorBase* collector) {
MemoryChunk* chunk_;
Address start_;
Address end_;
MarkingState marking_state_;
};
template <RememberedSetType type>
class RememberedSetUpdatingItem : public UpdatingItem {
public:
explicit RememberedSetUpdatingItem(Heap* heap,
MarkCompactCollectorBase* collector,
MemoryChunk* chunk)
: heap_(heap), collector_(collector), chunk_(chunk) {}
virtual ~RememberedSetUpdatingItem() {}
void Process() override {
base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (heap->InFromSpace(*slot)) {
if (heap_->InFromSpace(*slot)) {
HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
// There could still be stale pointers in large object space, map space,
// and old space for pages that have been promoted.
if (map_word.IsForwardingAddress()) {
*slot = map_word.ToForwardingAddress();
// The write is guarded by the page lock, but still needs to be atomic
// as the slot could be required for actually iterating objects, e.g.,
// if it is part of a Map and thus be read concurrently by some other
// task.
base::Relaxed_Store(
reinterpret_cast<base::AtomicWord*>(slot_address),
reinterpret_cast<base::AtomicWord>(map_word.ToForwardingAddress()));
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
if (heap->InToSpace(*slot)) {
if (heap_->InToSpace(*slot)) {
return KEEP_SLOT;
}
} else if (heap->InToSpace(*slot)) {
} else if (heap_->InToSpace(*slot)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set. Since
// there is no forwarding information present we need to check the
// markbits to determine liveness.
// slot has been recorded multiple times in the remembered set, or
// if the slot was already updated during old->old updating.
// In case the page has been moved, check markbits to determine liveness
// of the slot. In the other case, the slot can just be kept.
HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
// IsBlackOrGrey is required because objects are marked as grey for
// the young generation collector while they are black for the full MC.
if (ObjectMarking::IsBlackOrGrey(heap_object,
collector->marking_state(heap_object)))
return KEEP_SLOT;
// the young generation collector while they are black for the full MC.);
if (Page::FromAddress(heap_object->address())
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
if (ObjectMarking::IsBlackOrGrey(
heap_object, collector_->marking_state(heap_object))) {
return KEEP_SLOT;
} else {
return REMOVE_SLOT;
}
}
return KEEP_SLOT;
} else {
DCHECK(!heap->InNewSpace(*slot));
DCHECK(!heap_->InNewSpace(*slot));
}
return REMOVE_SLOT;
}
};
template <RememberedSetType type>
void MarkCompactCollectorBase::UpdatePointersInParallel(
base::Semaphore* semaphore) {
PageParallelJob<PointerUpdateJobTraits<type> > job(
heap(), heap()->isolate()->cancelable_task_manager(), semaphore);
RememberedSet<type>::IterateMemoryChunks(
heap(), [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
const int num_pages = job.NumberOfPages();
if (num_pages == 0) return;
const int num_tasks =
NumberOfParallelPointerUpdateTasks(num_pages, old_to_new_slots_);
job.Run(num_tasks, [this](int i) { return this; });
}
class ToSpacePointerUpdateJobTraits {
public:
struct PageData {
Address start;
Address end;
MarkingState marking_state;
};
typedef PageData PerPageData;
typedef PointersUpdatingVisitor* PerTaskData;
static void ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData page_data) {
if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration
// using markbits.
ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data);
void UpdateUntypedPointers() {
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::Iterate(chunk_, [this](Address slot) {
return CheckAndUpdateOldToNewSlot(slot);
});
} else {
ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
RememberedSet<OLD_TO_OLD>::Iterate(chunk_, [](Address slot) {
return UpdateSlot(reinterpret_cast<Object**>(slot));
});
}
}
private:
static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData page_data) {
for (Address cur = page_data.start; cur < page_data.end;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, visitor);
cur += size;
void UpdateTypedPointers() {
Isolate* isolate = heap_->isolate();
if (type == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_,
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
isolate, slot_type, slot, [this](Object** slot) {
return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
});
} else {
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk_,
[isolate](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
slot, UpdateSlot);
});
}
}
static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
PerPageData page_data) {
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
LiveObjectIterator<kAllLiveObjects> it(chunk, page_data.marking_state);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
Map* map = object->map();
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, visitor);
}
}
Heap* heap_;
MarkCompactCollectorBase* collector_;
MemoryChunk* chunk_;
};
void MarkCompactCollectorBase::UpdateToSpacePointersInParallel(
base::Semaphore* semaphore) {
PageParallelJob<ToSpacePointerUpdateJobTraits> job(
heap(), isolate()->cancelable_task_manager(), semaphore);
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
ItemParallelJob* job) {
// Seed to space pages.
const Address space_start = heap()->new_space()->bottom();
const Address space_end = heap()->new_space()->top();
int pages = 0;
for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
job.AddPage(page, {start, end, marking_state(page)});
job->AddItem(
new ToSpaceUpdatingItem(page, start, end, marking_state(page)));
pages++;
}
PointersUpdatingVisitor visitor;
const int num_tasks =
NumberOfParallelToSpacePointerUpdateTasks(job.NumberOfPages());
job.Run(num_tasks, [&visitor](int i) { return &visitor; });
if (pages == 0) return 0;
return NumberOfParallelToSpacePointerUpdateTasks(pages);
}
template <RememberedSetType type>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
ItemParallelJob* job) {
int pages = 0;
RememberedSet<type>::IterateMemoryChunks(
heap(), [this, &job, &pages](MemoryChunk* chunk) {
job->AddItem(new RememberedSetUpdatingItem<type>(heap(), this, chunk));
pages++;
});
if (pages == 0) return 0;
return NumberOfParallelPointerUpdateTasks(
pages, type == OLD_TO_NEW ? old_to_new_slots_ : -1);
}
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int remembered_set_tasks_old_new =
CollectRememberedSetUpdatingItems<OLD_TO_NEW>(&updating_job);
const int remembered_set_tasks_old_old =
CollectRememberedSetUpdatingItems<OLD_TO_OLD>(&updating_job);
const int num_tasks = Max(to_space_tasks, Max(remembered_set_tasks_old_new,
remembered_set_tasks_old_old));
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpatingTask(isolate()));
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
UpdateToSpacePointersInParallel(&page_parallel_job_semaphore_);
// Update roots.
PointersUpdatingVisitor updating_visitor;
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
UpdatePointersInParallel<OLD_TO_NEW>(&page_parallel_job_semaphore_);
}
{
Heap* heap = this->heap();
TRACE_GC(heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdatePointersInParallel<OLD_TO_OLD>(&page_parallel_job_semaphore_);
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run();
}
{
......@@ -4446,26 +4487,26 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
PointersUpdatingVisitor updating_visitor;
ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
&page_parallel_job_semaphore_);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
const int remembered_set_tasks =
CollectRememberedSetUpdatingItems<OLD_TO_NEW>(&updating_job);
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpatingTask(isolate()));
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
{
TRACE_GC(
heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE);
UpdateToSpacePointersInParallel(&page_parallel_job_semaphore_);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD);
UpdatePointersInParallel<OLD_TO_NEW>(&page_parallel_job_semaphore_);
}
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
}
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run();
}
{
......
......@@ -22,6 +22,7 @@ namespace internal {
// Forward declarations.
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
class LocalWorkStealingMarkingDeque;
class MarkCompactCollector;
class MinorMarkCompactCollector;
......@@ -276,9 +277,9 @@ class MarkCompactCollectorBase {
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes);
int CollectToSpaceUpdatingItems(ItemParallelJob* job);
template <RememberedSetType type>
void UpdatePointersInParallel(base::Semaphore* semaphore);
void UpdateToSpacePointersInParallel(base::Semaphore* semaphore);
int CollectRememberedSetUpdatingItems(ItemParallelJob* job);
int NumberOfParallelCompactionTasks(int pages);
int NumberOfParallelPointerUpdateTasks(int pages, int slots);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment