Commit 561e1629 authored by ulan's avatar ulan Committed by Commit bot

Implement parallel pointer updates after evacuation.

BUG=chromium:568495
LOG=NO

Review URL: https://codereview.chromium.org/1775003003

Cr-Commit-Position: refs/heads/master@{#34665}
parent 5c73b25f
......@@ -1074,6 +1074,7 @@ source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/page-parallel-job.h",
"src/heap/remembered-set.cc",
"src/heap/remembered-set.h",
"src/heap/scavenge-job.h",
......
......@@ -702,6 +702,8 @@ DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, false,
"use parallel pointer update during compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
......
......@@ -21,6 +21,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/page-parallel-job.h"
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
......@@ -2788,22 +2789,6 @@ class PointersUpdatingVisitor : public ObjectVisitor {
Heap* heap_;
};
static void UpdatePointer(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
// Since we only filter invalid slots in old space, the store buffer can
// still contain stale pointers in large object and in map spaces. Ignore
// these pointers here.
DCHECK(map_word.IsForwardingAddress() ||
!object->GetHeap()->old_space()->Contains(
reinterpret_cast<Address>(address)));
if (map_word.IsForwardingAddress()) {
// Update the corresponding slot.
*address = map_word.ToForwardingAddress();
}
}
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
......@@ -3158,8 +3143,10 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
intptr_t compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores =
Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
const int available_cores = Max(
1, static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
kNumSweepingTasks - 1);
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
......@@ -3585,6 +3572,81 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
#endif
}
template <PointerDirection direction>
class PointerUpdateJobTraits {
public:
typedef int PerPageData; // Per page data is not used in this job.
typedef PointersUpdatingVisitor* PerTaskData;
static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData) {
UpdateUntypedPointers(heap, chunk);
UpdateTypedPointers(heap, chunk, visitor);
return true;
}
static const bool NeedSequentialFinalization = false;
static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
}
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
if (direction == OLD_TO_NEW) {
RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
UpdateOldToNewSlot);
} else {
RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
PointersUpdatingVisitor::UpdateSlot(heap,
reinterpret_cast<Object**>(slot));
return REMOVE_SLOT;
});
}
}
static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
PointersUpdatingVisitor* visitor) {
if (direction == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk, [isolate, visitor](SlotType type, Address slot) {
UpdateTypedSlot(isolate, visitor, type, slot);
return REMOVE_SLOT;
});
}
}
static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
// Since we only filter invalid slots in old space, the store buffer can
// still contain stale pointers in large object and in map spaces. Ignore
// these pointers here.
DCHECK(map_word.IsForwardingAddress() ||
!object->GetHeap()->old_space()->Contains(
reinterpret_cast<Address>(address)));
if (map_word.IsForwardingAddress()) {
// Update the corresponding slot.
*address = map_word.ToForwardingAddress();
}
}
};
int NumberOfPointerUpdateTasks(int pages) {
if (!FLAG_parallel_pointer_update) return 1;
const int kMaxTasks = 4;
const int kPagesPerTask = 4;
return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
}
template <PointerDirection direction>
void UpdatePointersInParallel(Heap* heap) {
PageParallelJob<PointerUpdateJobTraits<direction> > job(
heap, heap->isolate()->cancelable_task_manager());
RememberedSet<direction>::IterateMemoryChunks(
heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
PointersUpdatingVisitor visitor(heap);
int num_pages = job.NumberOfPages();
int num_tasks = NumberOfPointerUpdateTasks(num_pages);
job.Run(num_tasks, [&visitor](int i) { return &visitor; });
}
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope gc_scope(heap()->tracer(),
......@@ -3606,7 +3668,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
UpdatePointersInParallel<OLD_TO_NEW>(heap_);
}
{
......@@ -3614,19 +3676,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
GCTracer::Scope gc_scope(
heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
RememberedSet<OLD_TO_OLD>::Iterate(heap, [heap](Address slot) {
PointersUpdatingVisitor::UpdateSlot(heap,
reinterpret_cast<Object**>(slot));
return REMOVE_SLOT;
});
Isolate* isolate = heap->isolate();
PointersUpdatingVisitor* visitor = &updating_visitor;
RememberedSet<OLD_TO_OLD>::IterateTyped(
heap, [isolate, visitor](SlotType type, Address slot) {
UpdateTypedSlot(isolate, visitor, type, slot);
return REMOVE_SLOT;
});
UpdatePointersInParallel<OLD_TO_OLD>(heap_);
}
{
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_PAGE_PARALLEL_JOB_
#define V8_HEAP_PAGE_PARALLEL_JOB_
#include "src/allocation.h"
#include "src/cancelable-task.h"
#include "src/utils.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
class Heap;
class Isolate;
// This class manages background tasks that process set of pages in parallel.
// The JobTraits class needs to define:
// - PerPageData type - state associated with each page.
// - PerTaskData type - state associated with each task.
// - static bool ProcessPageInParallel(Heap* heap,
// PerTaskData task_data,
// MemoryChunk* page,
// PerPageData page_data)
// The function should return true iff processing succeeded.
// - static const bool NeedSequentialFinalization
// - static void FinalizePageSequentially(Heap* heap,
// bool processing_succeeded,
// MemoryChunk* page,
// PerPageData page_data)
template <typename JobTraits>
class PageParallelJob {
public:
PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager)
: heap_(heap),
cancelable_task_manager_(cancelable_task_manager),
items_(nullptr),
num_items_(0),
pending_tasks_(0) {}
~PageParallelJob() {
Item* item = items_;
while (item != nullptr) {
Item* next = item->next;
delete item;
item = next;
}
}
void AddPage(MemoryChunk* chunk, typename JobTraits::PerPageData data) {
Item* item = new Item(chunk, data, items_);
items_ = item;
++num_items_;
}
int NumberOfPages() { return num_items_; }
// Runs the given number of tasks in parallel and processes the previosly
// added pages. This function blocks until all tasks finish.
// The callback takes the index of a task and returns data for that task.
template <typename Callback>
void Run(int num_tasks, Callback per_task_data_callback) {
if (num_items_ == 0) return;
DCHECK_GE(num_tasks, 1);
uint32_t task_ids[kMaxNumberOfTasks];
const int max_num_tasks = Min(
kMaxNumberOfTasks,
static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
num_tasks = Max(1, Min(num_tasks, max_num_tasks));
int items_per_task = (num_items_ + num_tasks - 1) / num_tasks;
int start_index = 0;
Task* main_task = nullptr;
for (int i = 0; i < num_tasks; i++, start_index += items_per_task) {
if (start_index >= num_items_) {
start_index -= num_items_;
}
Task* task = new Task(heap_, items_, num_items_, start_index,
&pending_tasks_, per_task_data_callback(i));
task_ids[i] = task->id();
if (i > 0) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
} else {
main_task = task;
}
}
// Contribute on main thread.
main_task->Run();
delete main_task;
// Wait for background tasks.
for (int i = 0; i < num_tasks; i++) {
if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
pending_tasks_.Wait();
}
}
if (JobTraits::NeedSequentialFinalization) {
Item* item = items_;
while (item != nullptr) {
bool success = (item->state.Value() == kFinished);
JobTraits::FinalizePageSequentially(heap_, item->chunk, success,
item->data);
item = item->next;
}
}
}
private:
static const int kMaxNumberOfTasks = 10;
enum ProcessingState { kAvailable, kProcessing, kFinished, kFailed };
struct Item : public Malloced {
Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
: chunk(chunk), state(kAvailable), data(data), next(next) {}
MemoryChunk* chunk;
AtomicValue<ProcessingState> state;
typename JobTraits::PerPageData data;
Item* next;
};
class Task : public CancelableTask {
public:
Task(Heap* heap, Item* items, int num_items, int start_index,
base::Semaphore* on_finish, typename JobTraits::PerTaskData data)
: CancelableTask(heap->isolate()),
heap_(heap),
items_(items),
num_items_(num_items),
start_index_(start_index),
on_finish_(on_finish),
data_(data) {}
virtual ~Task() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
// Each task starts at a different index to improve parallelization.
Item* current = items_;
int skip = start_index_;
while (skip-- > 0) {
current = current->next;
}
for (int i = 0; i < num_items_; i++) {
if (current->state.TrySetValue(kAvailable, kProcessing)) {
bool success = JobTraits::ProcessPageInParallel(
heap_, data_, current->chunk, current->data);
current->state.SetValue(success ? kFinished : kFailed);
}
current = current->next;
// Wrap around if needed.
if (current == nullptr) {
current = items_;
}
}
on_finish_->Signal();
}
Heap* heap_;
Item* items_;
int num_items_;
int start_index_;
base::Semaphore* on_finish_;
typename JobTraits::PerTaskData data_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
Heap* heap_;
CancelableTaskManager* cancelable_task_manager_;
Item* items_;
int num_items_;
base::Semaphore pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(PageParallelJob);
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_PAGE_PARALLEL_JOB_
......@@ -59,21 +59,41 @@ class RememberedSet {
// The callback should take (Address slot) and return SlotCallbackResult.
template <typename Callback>
static void Iterate(Heap* heap, Callback callback) {
IterateMemoryChunks(
heap, [callback](MemoryChunk* chunk) { Iterate(chunk, callback); });
}
// Iterates over all memory chunks that contains non-empty slot sets.
// The callback should take (MemoryChunk* chunk) and return void.
template <typename Callback>
static void IterateMemoryChunks(Heap* heap, Callback callback) {
MemoryChunkIterator it(heap, direction == OLD_TO_OLD
? MemoryChunkIterator::ALL
: MemoryChunkIterator::ALL_BUT_CODE_SPACE);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
new_count += slots[page].Iterate(callback);
}
if (new_count == 0) {
ReleaseSlotSet(chunk);
}
TypedSlotSet* typed_slots = GetTypedSlotSet(chunk);
if (slots != nullptr || typed_slots != nullptr) {
callback(chunk);
}
}
}
// Iterates and filters the remembered set in the given memory chunk with
// the given callback. The callback should take (Address slot) and return
// SlotCallbackResult.
template <typename Callback>
static void Iterate(MemoryChunk* chunk, Callback callback) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
int new_count = 0;
for (size_t page = 0; page < pages; page++) {
new_count += slots[page].Iterate(callback);
}
if (new_count == 0) {
ReleaseSlotSet(chunk);
}
}
}
......@@ -91,6 +111,14 @@ class RememberedSet {
});
}
template <typename Callback>
static void IterateWithWrapper(Heap* heap, MemoryChunk* chunk,
Callback callback) {
Iterate(chunk, [heap, callback](Address addr) {
return Wrapper(heap, addr, callback);
});
}
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
......@@ -116,20 +144,16 @@ class RememberedSet {
}
}
// Iterates and filters typed old to old pointers with the given callback.
// The callback should take (SlotType slot_type, Address slot_addr) and
// return SlotCallbackResult.
// Iterates and filters typed old to old pointers in the given memory chunk
// with the given callback. The callback should take (SlotType slot_type,
// Address slot_addr) and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(Heap* heap, Callback callback) {
MemoryChunkIterator it(heap, MemoryChunkIterator::ALL_BUT_MAP_SPACE);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
TypedSlotSet* slots = chunk->typed_old_to_old_slots();
if (slots != nullptr) {
int new_count = slots->Iterate(callback);
if (new_count == 0) {
chunk->ReleaseTypedOldToOldSlots();
}
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
TypedSlotSet* slots = chunk->typed_old_to_old_slots();
if (slots != nullptr) {
int new_count = slots->Iterate(callback);
if (new_count == 0) {
chunk->ReleaseTypedOldToOldSlots();
}
}
}
......@@ -162,6 +186,14 @@ class RememberedSet {
}
}
static TypedSlotSet* GetTypedSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
return chunk->typed_old_to_old_slots();
} else {
return nullptr;
}
}
static void ReleaseSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->ReleaseOldToOldSlots();
......
......@@ -894,6 +894,7 @@
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
'../../src/heap/page-parallel-job.h',
'../../src/heap/remembered-set.cc',
'../../src/heap/remembered-set.h',
'../../src/heap/scavenge-job.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment