Commit 5359d868 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Add support for multiple concurrent marking tasks.

BUG=chromium:694255

Change-Id: Ib0403a2d406428d2cd7896521abb6e95c3841c1c
Reviewed-on: https://chromium-review.googlesource.com/563364
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46505}
parent b2133cd6
...@@ -255,10 +255,11 @@ class ConcurrentMarkingVisitor final ...@@ -255,10 +255,11 @@ class ConcurrentMarkingVisitor final
class ConcurrentMarking::Task : public CancelableTask { class ConcurrentMarking::Task : public CancelableTask {
public: public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking, Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
base::Semaphore* on_finish, int task_id) base::Semaphore* on_finish, base::Mutex* lock, int task_id)
: CancelableTask(isolate), : CancelableTask(isolate),
concurrent_marking_(concurrent_marking), concurrent_marking_(concurrent_marking),
on_finish_(on_finish), on_finish_(on_finish),
lock_(lock),
task_id_(task_id) {} task_id_(task_id) {}
virtual ~Task() {} virtual ~Task() {}
...@@ -266,12 +267,13 @@ class ConcurrentMarking::Task : public CancelableTask { ...@@ -266,12 +267,13 @@ class ConcurrentMarking::Task : public CancelableTask {
private: private:
// v8::internal::CancelableTask overrides. // v8::internal::CancelableTask overrides.
void RunInternal() override { void RunInternal() override {
concurrent_marking_->Run(task_id_); concurrent_marking_->Run(task_id_, lock_);
on_finish_->Signal(); on_finish_->Signal();
} }
ConcurrentMarking* concurrent_marking_; ConcurrentMarking* concurrent_marking_;
base::Semaphore* on_finish_; base::Semaphore* on_finish_;
base::Mutex* lock_;
int task_id_; int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task); DISALLOW_COPY_AND_ASSIGN(Task);
}; };
...@@ -279,25 +281,24 @@ class ConcurrentMarking::Task : public CancelableTask { ...@@ -279,25 +281,24 @@ class ConcurrentMarking::Task : public CancelableTask {
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
MarkingWorklist* bailout) MarkingWorklist* bailout)
: heap_(heap), : heap_(heap),
pending_task_semaphore_(0),
shared_(shared), shared_(shared),
bailout_(bailout), bailout_(bailout),
is_task_pending_(false) { pending_task_semaphore_(0),
pending_task_count_(0) {
// The runtime flag should be set only if the compile time flag was set. // The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING #ifndef V8_CONCURRENT_MARKING
CHECK(!FLAG_concurrent_marking); CHECK(!FLAG_concurrent_marking);
#endif #endif
} }
void ConcurrentMarking::Run(int task_id) { void ConcurrentMarking::Run(int task_id, base::Mutex* lock) {
ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id); ConcurrentMarkingVisitor visitor(shared_, bailout_, task_id);
double time_ms = heap_->MonotonicallyIncreasingTimeInMs(); double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
size_t bytes_marked = 0; size_t bytes_marked = 0;
base::Mutex* relocation_mutex = heap_->relocation_mutex();
{ {
TimedScope scope(&time_ms); TimedScope scope(&time_ms);
while (true) { while (true) {
base::LockGuard<base::Mutex> guard(relocation_mutex); base::LockGuard<base::Mutex> guard(lock);
HeapObject* object; HeapObject* object;
if (!shared_->Pop(task_id, &object)) break; if (!shared_->Pop(task_id, &object)) break;
Address new_space_top = heap_->new_space()->original_top(); Address new_space_top = heap_->new_space()->original_top();
...@@ -313,7 +314,7 @@ void ConcurrentMarking::Run(int task_id) { ...@@ -313,7 +314,7 @@ void ConcurrentMarking::Run(int task_id) {
{ {
// Take the lock to synchronize with worklist update after // Take the lock to synchronize with worklist update after
// young generation GC. // young generation GC.
base::LockGuard<base::Mutex> guard(relocation_mutex); base::LockGuard<base::Mutex> guard(lock);
bailout_->FlushToGlobal(task_id); bailout_->FlushToGlobal(task_id);
} }
} }
...@@ -324,25 +325,38 @@ void ConcurrentMarking::Run(int task_id) { ...@@ -324,25 +325,38 @@ void ConcurrentMarking::Run(int task_id) {
} }
} }
void ConcurrentMarking::StartTask() { void ConcurrentMarking::Start() {
const int kConcurrentMarkingTaskId = 1;
if (!FLAG_concurrent_marking) return; if (!FLAG_concurrent_marking) return;
is_task_pending_ = true; pending_task_count_ = kTasks;
V8::GetCurrentPlatform()->CallOnBackgroundThread( for (int i = 0; i < kTasks; i++) {
new Task(heap_->isolate(), this, &pending_task_semaphore_, int task_id = i + 1;
kConcurrentMarkingTaskId), V8::GetCurrentPlatform()->CallOnBackgroundThread(
v8::Platform::kShortRunningTask); new Task(heap_->isolate(), this, &pending_task_semaphore_,
&task_lock_[i].lock, task_id),
v8::Platform::kShortRunningTask);
}
} }
void ConcurrentMarking::WaitForTaskToComplete() { void ConcurrentMarking::EnsureCompleted() {
if (!FLAG_concurrent_marking) return; if (!FLAG_concurrent_marking) return;
pending_task_semaphore_.Wait(); while (pending_task_count_ > 0) {
is_task_pending_ = false; pending_task_semaphore_.Wait();
pending_task_count_--;
}
} }
void ConcurrentMarking::EnsureTaskCompleted() { ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
if (IsTaskPending()) { : concurrent_marking_(concurrent_marking) {
WaitForTaskToComplete(); if (!FLAG_concurrent_marking) return;
for (int i = 0; i < kTasks; i++) {
concurrent_marking_->task_lock_[i].lock.Lock();
}
}
ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return;
for (int i = kTasks - 1; i >= 0; i--) {
concurrent_marking_->task_lock_[i].lock.Unlock();
} }
} }
......
...@@ -20,24 +20,40 @@ class Worklist; ...@@ -20,24 +20,40 @@ class Worklist;
class ConcurrentMarking { class ConcurrentMarking {
public: public:
// When the scope is entered, the concurrent marking tasks
// are paused and are not looking at the heap objects.
class PauseScope {
public:
explicit PauseScope(ConcurrentMarking* concurrent_marking);
~PauseScope();
private:
ConcurrentMarking* concurrent_marking_;
};
static const int kTasks = 4;
using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>; using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared_, ConcurrentMarking(Heap* heap, MarkingWorklist* shared_,
MarkingWorklist* bailout_); MarkingWorklist* bailout_);
void StartTask(); void Start();
void WaitForTaskToComplete(); bool IsRunning() { return pending_task_count_ > 0; }
bool IsTaskPending() { return is_task_pending_; } void EnsureCompleted();
void EnsureTaskCompleted();
private: private:
struct TaskLock {
base::Mutex lock;
char cache_line_padding[64];
};
class Task; class Task;
void Run(int task_id); void Run(int task_id, base::Mutex* lock);
Heap* heap_; Heap* heap_;
base::Semaphore pending_task_semaphore_;
MarkingWorklist* shared_; MarkingWorklist* shared_;
MarkingWorklist* bailout_; MarkingWorklist* bailout_;
bool is_task_pending_; TaskLock task_lock_[kTasks];
base::Semaphore pending_task_semaphore_;
int pending_task_count_;
}; };
} // namespace internal } // namespace internal
......
...@@ -1176,7 +1176,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index, ...@@ -1176,7 +1176,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
DCHECK(array->map() != fixed_cow_array_map()); DCHECK(array->map() != fixed_cow_array_map());
Object** dst = array->data_start() + dst_index; Object** dst = array->data_start() + dst_index;
Object** src = array->data_start() + src_index; Object** src = array->data_start() + src_index;
if (FLAG_concurrent_marking && concurrent_marking()->IsTaskPending()) { if (FLAG_concurrent_marking && concurrent_marking()->IsRunning()) {
if (dst < src) { if (dst < src) {
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
base::AsAtomicWord::Relaxed_Store( base::AsAtomicWord::Relaxed_Store(
...@@ -1647,6 +1647,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer { ...@@ -1647,6 +1647,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::EvacuateYoungGeneration() { void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE); TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
base::LockGuard<base::Mutex> guard(relocation_mutex()); base::LockGuard<base::Mutex> guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) { if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_); DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size())); DCHECK(CanExpandOldGeneration(new_space()->Size()));
...@@ -1696,6 +1697,7 @@ static bool IsLogging(Isolate* isolate) { ...@@ -1696,6 +1697,7 @@ static bool IsLogging(Isolate* isolate) {
void Heap::Scavenge() { void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE); TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::LockGuard<base::Mutex> guard(relocation_mutex()); base::LockGuard<base::Mutex> guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// There are soft limits in the allocation code, designed to trigger a mark // There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to // sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed. // trigger one during scavenge: scavenges allocation should always succeed.
......
...@@ -592,8 +592,7 @@ void IncrementalMarking::StartMarking() { ...@@ -592,8 +592,7 @@ void IncrementalMarking::StartMarking() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
ConcurrentMarking* concurrent_marking = heap_->concurrent_marking(); heap_->concurrent_marking()->Start();
concurrent_marking->StartTask();
} }
// Ready to start incremental marking. // Ready to start incremental marking.
......
...@@ -931,7 +931,7 @@ void MarkCompactCollector::Prepare() { ...@@ -931,7 +931,7 @@ void MarkCompactCollector::Prepare() {
// them here. // them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted(); heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
heap()->concurrent_marking()->EnsureTaskCompleted(); heap()->concurrent_marking()->EnsureCompleted();
// Clear marking bits if incremental marking is aborted. // Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) { if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
......
...@@ -28,8 +28,8 @@ TEST(ConcurrentMarking) { ...@@ -28,8 +28,8 @@ TEST(ConcurrentMarking) {
CHECK(shared.Pop(0, &object)); CHECK(shared.Pop(0, &object));
ConcurrentMarking* concurrent_marking = ConcurrentMarking* concurrent_marking =
new ConcurrentMarking(heap, &shared, &bailout); new ConcurrentMarking(heap, &shared, &bailout);
concurrent_marking->StartTask(); concurrent_marking->Start();
concurrent_marking->WaitForTaskToComplete(); concurrent_marking->EnsureCompleted();
delete concurrent_marking; delete concurrent_marking;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment