Commit ff27dca5 authored by Nico Hartmann's avatar Nico Hartmann Committed by Commit Bot

Revert "[Heap]: Marking use Jobs."

This reverts commit 4a2b2b2e.

Reason for revert: Speculative revert due to https://ci.chromium.org/p/chromium/builders/try/linux-rel/495075?

Original change's description:
> [Heap]: Marking use Jobs.
> 
> StopRequest is removed in favor of:
> COMPLETE_TASKS_FOR_TESTING -> JoinForTesting()
> PREEMPT_TASKS -> Pause()
> COMPLETE_ONGOING_TASKS now has the same behavior as PREEMPT_TASKS
> - we should avoid waiting on the main thread as much as possible.
> 
> Change-Id: Icceeb4f0c0fda2ed234b2f26fe308b11410fcfb7
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2376166
> Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#70037}

TBR=ulan@chromium.org,etiennep@chromium.org

Change-Id: I63f24bffa0f56c6ffa1d1977fc4fb8a76b6f3ba2
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2423722Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70049}
parent 27a8684a
...@@ -347,30 +347,27 @@ FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) { ...@@ -347,30 +347,27 @@ FixedArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
return FixedArray::unchecked_cast(object); return FixedArray::unchecked_cast(object);
} }
class ConcurrentMarking::JobTask : public v8::JobTask { class ConcurrentMarking::Task : public CancelableTask {
public: public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch, Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
bool is_forced_gc) TaskState* task_state, int task_id)
: concurrent_marking_(concurrent_marking), : CancelableTask(isolate),
mark_compact_epoch_(mark_compact_epoch), concurrent_marking_(concurrent_marking),
is_forced_gc_(is_forced_gc) {} task_state_(task_state),
task_id_(task_id) {}
~JobTask() override = default; ~Task() override = default;
// v8::JobTask overrides. private:
void Run(JobDelegate* delegate) override { // v8::internal::CancelableTask overrides.
concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_); void RunInternal() override {
} concurrent_marking_->Run(task_id_, task_state_);
size_t GetMaxConcurrency(size_t worker_count) const override {
return concurrent_marking_->GetMaxConcurrency(worker_count);
} }
private:
ConcurrentMarking* concurrent_marking_; ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_; TaskState* task_state_;
const bool is_forced_gc_; int task_id_;
DISALLOW_COPY_AND_ASSIGN(JobTask); DISALLOW_COPY_AND_ASSIGN(Task);
}; };
ConcurrentMarking::ConcurrentMarking(Heap* heap, ConcurrentMarking::ConcurrentMarking(Heap* heap,
...@@ -385,19 +382,16 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, ...@@ -385,19 +382,16 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
#endif #endif
} }
void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch, void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
bool is_forced_gc) {
TRACE_BACKGROUND_GC(heap_->tracer(), TRACE_BACKGROUND_GC(heap_->tracer(),
GCTracer::BackgroundScope::MC_BACKGROUND_MARKING); GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
size_t kBytesUntilInterruptCheck = 64 * KB; size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000; int kObjectsUntilInterrupCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = &task_state_[task_id];
MarkingWorklists::Local local_marking_worklists(marking_worklists_); MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor( ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_, task_id, &local_marking_worklists, weak_objects_, heap_,
mark_compact_epoch, Heap::GetBytecodeFlushMode(), task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc, heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
&task_state->memory_chunk_data); &task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer = NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer; task_state->native_context_inferrer;
...@@ -463,7 +457,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch, ...@@ -463,7 +457,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
marked_bytes += current_marked_bytes; marked_bytes += current_marked_bytes;
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
marked_bytes); marked_bytes);
if (delegate->ShouldYield()) { if (task_state->preemption_request) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ConcurrentMarking::Run Preempted"); "ConcurrentMarking::Run Preempted");
break; break;
...@@ -498,6 +492,13 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch, ...@@ -498,6 +492,13 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
if (ephemeron_marked) { if (ephemeron_marked) {
set_ephemeron_marked(true); set_ephemeron_marked(true);
} }
{
base::MutexGuard guard(&pending_lock_);
is_pending_[task_id] = false;
--pending_task_count_;
pending_condition_.NotifyAll();
}
} }
if (FLAG_trace_concurrent_marking) { if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp( heap_->isolate()->PrintWithTimestamp(
...@@ -506,62 +507,109 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch, ...@@ -506,62 +507,109 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
} }
} }
size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) { void ConcurrentMarking::ScheduleTasks() {
// TODO(ulan): Iterate context_worklists() if other worklists are empty.
return std::min<size_t>(
kMaxTasks, worker_count + marking_worklists_->shared()->Size() +
weak_objects_->current_ephemerons.GlobalPoolSize() +
weak_objects_->discovered_ephemerons.GlobalPoolSize());
}
void ConcurrentMarking::ScheduleJob() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown()); DCHECK(!heap_->IsTearingDown());
DCHECK(!job_handle_ || !job_handle_->IsRunning()); base::MutexGuard guard(&pending_lock_);
if (total_task_count_ == 0) {
job_handle_ = V8::GetCurrentPlatform()->PostJob( static const int num_cores =
TaskPriority::kUserVisible, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(), #if defined(V8_OS_MACOSX)
heap_->is_current_gc_forced())); // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
DCHECK(job_handle_->IsRunning()); // marking on competing hyper-threads (regresses Octane/Splay). As such,
// only use num_cores/2, leaving one of those for the main thread.
// TODO(ulan): Use all cores on Mac 10.12+.
total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
#else // defined(V8_OS_MACOSX)
// On other platforms use all logical cores, leaving one for the main
// thread.
total_task_count_ = Max(1, Min(kMaxTasks, num_cores - 2));
#endif // defined(V8_OS_MACOSX)
if (FLAG_gc_experiment_reduce_concurrent_marking_tasks) {
// Use at most half of the cores in the experiment.
total_task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
}
DCHECK_LE(total_task_count_, kMaxTasks);
}
// Task id 0 is for the main thread.
for (int i = 1; i <= total_task_count_; i++) {
if (!is_pending_[i]) {
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i);
}
task_state_[i].preemption_request = false;
task_state_[i].mark_compact_epoch =
heap_->mark_compact_collector()->epoch();
task_state_[i].is_forced_gc = heap_->is_current_gc_forced();
is_pending_[i] = true;
++pending_task_count_;
auto task =
std::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
cancelable_id_[i] = task->id();
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
DCHECK_EQ(total_task_count_, pending_task_count_);
} }
void ConcurrentMarking::RescheduleJobIfNeeded() { void ConcurrentMarking::RescheduleTasksIfNeeded() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (heap_->IsTearingDown()) return; if (heap_->IsTearingDown()) return;
{
if (marking_worklists_->shared()->IsEmpty() && base::MutexGuard guard(&pending_lock_);
weak_objects_->current_ephemerons.IsGlobalPoolEmpty() && // The total task count is initialized in ScheduleTasks from
weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) { // NumberOfWorkerThreads of the platform.
return; if (total_task_count_ > 0 && pending_task_count_ == total_task_count_) {
return;
}
}
if (!marking_worklists_->shared()->IsEmpty() ||
!weak_objects_->current_ephemerons.IsGlobalPoolEmpty() ||
!weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
ScheduleTasks();
} }
if (!job_handle_ || !job_handle_->IsRunning())
ScheduleJob();
else
job_handle_->NotifyConcurrencyIncrease();
} }
bool ConcurrentMarking::Pause() { bool ConcurrentMarking::Stop(StopRequest stop_request) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (!job_handle_ || !job_handle_->IsRunning()) return false; base::MutexGuard guard(&pending_lock_);
job_handle_->Cancel();
if (pending_task_count_ == 0) return false;
if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
CancelableTaskManager* task_manager =
heap_->isolate()->cancelable_task_manager();
for (int i = 1; i <= total_task_count_; i++) {
if (is_pending_[i]) {
if (task_manager->TryAbort(cancelable_id_[i]) ==
TryAbortResult::kTaskAborted) {
is_pending_[i] = false;
--pending_task_count_;
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
task_state_[i].preemption_request = true;
}
}
}
}
while (pending_task_count_ > 0) {
pending_condition_.Wait(&pending_lock_);
}
for (int i = 1; i <= total_task_count_; i++) {
DCHECK(!is_pending_[i]);
}
return true; return true;
} }
void ConcurrentMarking::JoinForTesting() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(job_handle_ && job_handle_->IsRunning());
job_handle_->Join();
}
bool ConcurrentMarking::IsStopped() { bool ConcurrentMarking::IsStopped() {
if (!FLAG_concurrent_marking) return true; if (!FLAG_concurrent_marking) return true;
return !job_handle_ || !job_handle_->IsRunning(); base::MutexGuard guard(&pending_lock_);
return pending_task_count_ == 0;
} }
void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) { void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
for (int i = 1; i <= kMaxTasks; i++) { for (int i = 1; i <= total_task_count_; i++) {
main_stats->Merge(task_state_[i].native_context_stats); main_stats->Merge(task_state_[i].native_context_stats);
task_state_[i].native_context_stats.Clear(); task_state_[i].native_context_stats.Clear();
} }
...@@ -569,8 +617,8 @@ void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) { ...@@ -569,8 +617,8 @@ void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
void ConcurrentMarking::FlushMemoryChunkData( void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) { MajorNonAtomicMarkingState* marking_state) {
DCHECK(!job_handle_ || !job_handle_->IsRunning()); DCHECK_EQ(pending_task_count_, 0);
for (int i = 1; i <= kMaxTasks; i++) { for (int i = 1; i <= total_task_count_; i++) {
MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data; MemoryChunkDataMap& memory_chunk_data = task_state_[i].memory_chunk_data;
for (auto& pair : memory_chunk_data) { for (auto& pair : memory_chunk_data) {
// ClearLiveness sets the live bytes to zero. // ClearLiveness sets the live bytes to zero.
...@@ -592,7 +640,7 @@ void ConcurrentMarking::FlushMemoryChunkData( ...@@ -592,7 +640,7 @@ void ConcurrentMarking::FlushMemoryChunkData(
} }
void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) { void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
for (int i = 1; i <= kMaxTasks; i++) { for (int i = 1; i <= total_task_count_; i++) {
auto it = task_state_[i].memory_chunk_data.find(chunk); auto it = task_state_[i].memory_chunk_data.find(chunk);
if (it != task_state_[i].memory_chunk_data.end()) { if (it != task_state_[i].memory_chunk_data.end()) {
it->second.live_bytes = 0; it->second.live_bytes = 0;
...@@ -603,7 +651,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) { ...@@ -603,7 +651,7 @@ void ConcurrentMarking::ClearMemoryChunkData(MemoryChunk* chunk) {
size_t ConcurrentMarking::TotalMarkedBytes() { size_t ConcurrentMarking::TotalMarkedBytes() {
size_t result = 0; size_t result = 0;
for (int i = 1; i <= kMaxTasks; i++) { for (int i = 1; i <= total_task_count_; i++) {
result += result +=
base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes); base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
} }
...@@ -613,12 +661,14 @@ size_t ConcurrentMarking::TotalMarkedBytes() { ...@@ -613,12 +661,14 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking) ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking), : concurrent_marking_(concurrent_marking),
resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) { resume_on_exit_(FLAG_concurrent_marking &&
concurrent_marking_->Stop(
ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking); DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
} }
ConcurrentMarking::PauseScope::~PauseScope() { ConcurrentMarking::PauseScope::~PauseScope() {
if (resume_on_exit_) concurrent_marking_->ScheduleJob(); if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
} }
} // namespace internal } // namespace internal
......
...@@ -54,6 +54,17 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -54,6 +54,17 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
const bool resume_on_exit_; const bool resume_on_exit_;
}; };
enum class StopRequest {
// Preempt ongoing tasks ASAP (and cancel unstarted tasks).
PREEMPT_TASKS,
// Wait for ongoing tasks to complete (and cancels unstarted tasks).
COMPLETE_ONGOING_TASKS,
// Wait for all scheduled tasks to complete (only use this in tests that
// control the full stack -- otherwise tasks cancelled by the platform can
// make this call hang).
COMPLETE_TASKS_FOR_TESTING,
};
// TODO(gab): The only thing that prevents this being above 7 is // TODO(gab): The only thing that prevents this being above 7 is
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread). // task 0, reserved for the main thread).
...@@ -65,14 +76,13 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -65,14 +76,13 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// Schedules asynchronous tasks to perform concurrent marking. Objects in the // Schedules asynchronous tasks to perform concurrent marking. Objects in the
// heap should not be moved while these are active (can be stopped safely via // heap should not be moved while these are active (can be stopped safely via
// Stop() or PauseScope). // Stop() or PauseScope).
void ScheduleJob(); void ScheduleTasks();
void RescheduleJobIfNeeded();
// Stops concurrent marking per |stop_request|'s semantics. Returns true // Stops concurrent marking per |stop_request|'s semantics. Returns true
// if concurrent marking was in progress, false otherwise. // if concurrent marking was in progress, false otherwise.
bool Pause(); bool Stop(StopRequest stop_request);
void JoinForTesting();
void RescheduleTasksIfNeeded();
// Flushes native context sizes to the given table of the main thread. // Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats); void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state. // Flushes memory chunk data using the given marking state.
...@@ -93,24 +103,31 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -93,24 +103,31 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
private: private:
struct TaskState { struct TaskState {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
std::atomic<bool> preemption_request;
size_t marked_bytes = 0; size_t marked_bytes = 0;
unsigned mark_compact_epoch;
bool is_forced_gc;
MemoryChunkDataMap memory_chunk_data; MemoryChunkDataMap memory_chunk_data;
NativeContextInferrer native_context_inferrer; NativeContextInferrer native_context_inferrer;
NativeContextStats native_context_stats; NativeContextStats native_context_stats;
char cache_line_padding[64]; char cache_line_padding[64];
}; };
class JobTask; class Task;
void Run(JobDelegate* delegate, unsigned mark_compact_epoch, void Run(int task_id, TaskState* task_state);
bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);
std::unique_ptr<JobHandle> job_handle_;
Heap* const heap_; Heap* const heap_;
MarkingWorklists* const marking_worklists_; MarkingWorklists* const marking_worklists_;
WeakObjects* const weak_objects_; WeakObjects* const weak_objects_;
TaskState task_state_[kMaxTasks + 1]; TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0}; std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false}; std::atomic<bool> ephemeron_marked_{false};
base::Mutex pending_lock_;
base::ConditionVariable pending_condition_;
int pending_task_count_ = 0;
bool is_pending_[kMaxTasks + 1] = {};
CancelableTaskManager::Id cancelable_id_[kMaxTasks + 1] = {};
int total_task_count_ = 0;
}; };
} // namespace internal } // namespace internal
......
...@@ -5570,10 +5570,6 @@ void Heap::TearDown() { ...@@ -5570,10 +5570,6 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr; stress_scavenge_observer_ = nullptr;
} }
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
concurrent_marking_->Pause();
}
if (mark_compact_collector_) { if (mark_compact_collector_) {
mark_compact_collector_->TearDown(); mark_compact_collector_->TearDown();
mark_compact_collector_.reset(); mark_compact_collector_.reset();
...@@ -5590,7 +5586,6 @@ void Heap::TearDown() { ...@@ -5590,7 +5586,6 @@ void Heap::TearDown() {
scavenger_collector_.reset(); scavenger_collector_.reset();
array_buffer_sweeper_.reset(); array_buffer_sweeper_.reset();
incremental_marking_.reset(); incremental_marking_.reset();
concurrent_marking_.reset(); concurrent_marking_.reset();
gc_idle_time_handler_.reset(); gc_idle_time_handler_.reset();
......
...@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() { ...@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() {
MarkRoots(); MarkRoots();
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) { if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleJob(); heap_->concurrent_marking()->ScheduleTasks();
} }
// Ready to start incremental marking. // Ready to start incremental marking.
...@@ -1104,7 +1104,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -1104,7 +1104,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
} }
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork(); local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleJobIfNeeded(); heap_->concurrent_marking()->RescheduleTasksIfNeeded();
} }
} }
if (state_ == MARKING) { if (state_ == MARKING) {
......
...@@ -904,11 +904,12 @@ void MarkCompactCollector::Prepare() { ...@@ -904,11 +904,12 @@ void MarkCompactCollector::Prepare() {
} }
} }
void MarkCompactCollector::FinishConcurrentMarking() { void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
// FinishConcurrentMarking is called for both, concurrent and parallel, // FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished. // marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) { if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Pause(); heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushMemoryChunkData( heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state()); non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_); heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
...@@ -1663,11 +1664,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() { ...@@ -1663,11 +1664,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING); GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) { if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(); heap_->concurrent_marking()->RescheduleTasksIfNeeded();
} }
work_to_do = ProcessEphemerons(); work_to_do = ProcessEphemerons();
FinishConcurrentMarking(); FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
} }
CHECK(weak_objects_.current_ephemerons.IsEmpty()); CHECK(weak_objects_.current_ephemerons.IsEmpty());
...@@ -1982,11 +1984,12 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -1982,11 +1984,12 @@ void MarkCompactCollector::MarkLiveObjects() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) { if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(); heap_->concurrent_marking()->RescheduleTasksIfNeeded();
} }
DrainMarkingWorklist(); DrainMarkingWorklist();
FinishConcurrentMarking(); FinishConcurrentMarking(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
DrainMarkingWorklist(); DrainMarkingWorklist();
} }
......
...@@ -479,7 +479,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -479,7 +479,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Stop concurrent marking (either by preempting it right away or waiting for // Stop concurrent marking (either by preempting it right away or waiting for
// it to complete as requested by |stop_request|). // it to complete as requested by |stop_request|).
void FinishConcurrentMarking(); void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
bool StartCompaction(); bool StartCompaction();
......
...@@ -44,8 +44,9 @@ TEST(ConcurrentMarking) { ...@@ -44,8 +44,9 @@ TEST(ConcurrentMarking) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects); new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob(); concurrent_marking->ScheduleTasks();
concurrent_marking->JoinForTesting(); concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking; delete concurrent_marking;
} }
...@@ -66,12 +67,14 @@ TEST(ConcurrentMarkingReschedule) { ...@@ -66,12 +67,14 @@ TEST(ConcurrentMarkingReschedule) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects); new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob(); concurrent_marking->ScheduleTasks();
concurrent_marking->Pause(); concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleJobIfNeeded(); concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->JoinForTesting(); concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking; delete concurrent_marking;
} }
...@@ -93,13 +96,14 @@ TEST(ConcurrentMarkingPreemptAndReschedule) { ...@@ -93,13 +96,14 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleJob(); concurrent_marking->ScheduleTasks();
concurrent_marking->Pause(); concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleJobIfNeeded(); concurrent_marking->RescheduleTasksIfNeeded();
concurrent_marking->JoinForTesting(); concurrent_marking->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking; delete concurrent_marking;
} }
...@@ -113,7 +117,8 @@ TEST(ConcurrentMarkingMarkedBytes) { ...@@ -113,7 +117,8 @@ TEST(ConcurrentMarkingMarkedBytes) {
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return; if (!heap->incremental_marking()->IsStopped()) return;
heap::SimulateIncrementalMarking(heap, false); heap::SimulateIncrementalMarking(heap, false);
heap->concurrent_marking()->JoinForTesting(); heap->concurrent_marking()->Stop(
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size()); CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment