Commit 9b1f9e5d authored by Etienne Pierre-doray's avatar Etienne Pierre-doray Committed by Commit Bot

[heap] Split ConcurrentMarking::Stop and update priority

This CL refactors ConcurrentMarking::Stop to have explicit Join and Pause.
MarkCompact updates job priority to UserBlocking before joining.

Change-Id: I71cb469e35cc4df7fdb0dbd8c0cf9c1642e8f5fd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2491109Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70738}
parent 545e9dc5
...@@ -522,19 +522,19 @@ size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) { ...@@ -522,19 +522,19 @@ size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
weak_objects_->current_ephemerons.GlobalPoolSize()})); weak_objects_->current_ephemerons.GlobalPoolSize()}));
} }
void ConcurrentMarking::ScheduleTasks() { void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown()); DCHECK(!heap_->IsTearingDown());
DCHECK(!job_handle_ || !job_handle_->IsValid()); DCHECK(!job_handle_ || !job_handle_->IsValid());
job_handle_ = V8::GetCurrentPlatform()->PostJob( job_handle_ = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible, priority,
std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(), std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
heap_->is_current_gc_forced())); heap_->is_current_gc_forced()));
DCHECK(job_handle_->IsValid()); DCHECK(job_handle_->IsValid());
} }
void ConcurrentMarking::RescheduleTasksIfNeeded() { void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (heap_->IsTearingDown()) return; if (heap_->IsTearingDown()) return;
...@@ -543,21 +543,26 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() { ...@@ -543,21 +543,26 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) { weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
return; return;
} }
if (!job_handle_ || !job_handle_->IsValid()) if (!job_handle_ || !job_handle_->IsValid()) {
ScheduleTasks(); ScheduleJob(priority);
else } else {
if (priority != TaskPriority::kUserVisible)
job_handle_->UpdatePriority(priority);
job_handle_->NotifyConcurrencyIncrease(); job_handle_->NotifyConcurrencyIncrease();
}
} }
bool ConcurrentMarking::Stop(StopRequest stop_request) { void ConcurrentMarking::Join() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (!job_handle_ || !job_handle_->IsValid()) return;
job_handle_->Join();
}
bool ConcurrentMarking::Pause() {
DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking); DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
if (!job_handle_ || !job_handle_->IsValid()) return false; if (!job_handle_ || !job_handle_->IsValid()) return false;
if (stop_request == StopRequest::PREEMPT_TASKS) { job_handle_->Cancel();
job_handle_->Cancel();
} else {
job_handle_->Join();
}
return true; return true;
} }
...@@ -622,14 +627,12 @@ size_t ConcurrentMarking::TotalMarkedBytes() { ...@@ -622,14 +627,12 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking) ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking), : concurrent_marking_(concurrent_marking),
resume_on_exit_(FLAG_concurrent_marking && resume_on_exit_(FLAG_concurrent_marking && concurrent_marking_->Pause()) {
concurrent_marking_->Stop(
ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking); DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
} }
ConcurrentMarking::PauseScope::~PauseScope() { ConcurrentMarking::PauseScope::~PauseScope() {
if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded(); if (resume_on_exit_) concurrent_marking_->RescheduleJobIfNeeded();
} }
} // namespace internal } // namespace internal
......
...@@ -54,17 +54,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -54,17 +54,6 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
const bool resume_on_exit_; const bool resume_on_exit_;
}; };
enum class StopRequest {
// Preempt ongoing tasks ASAP (and cancel unstarted tasks).
PREEMPT_TASKS,
// Wait for ongoing tasks to complete (and cancels unstarted tasks).
COMPLETE_ONGOING_TASKS,
// Wait for all scheduled tasks to complete (only use this in tests that
// control the full stack -- otherwise tasks cancelled by the platform can
// make this call hang).
COMPLETE_TASKS_FOR_TESTING,
};
// TODO(gab): The only thing that prevents this being above 7 is // TODO(gab): The only thing that prevents this being above 7 is
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread). // task 0, reserved for the main thread).
...@@ -73,16 +62,22 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -73,16 +62,22 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists, ConcurrentMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects); WeakObjects* weak_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the // Schedules asynchronous job to perform concurrent marking at |priority|.
// heap should not be moved while these are active (can be stopped safely via // Objects in the heap should not be moved while these are active (can be
// Stop() or PauseScope). // stopped safely via Stop() or PauseScope).
void ScheduleTasks(); void ScheduleJob(TaskPriority priority = TaskPriority::kUserVisible);
// Stops concurrent marking per |stop_request|'s semantics. Returns true // Waits for scheduled job to complete.
// if concurrent marking was in progress, false otherwise. void Join();
bool Stop(StopRequest stop_request); // Preempts ongoing job ASAP. Returns true if concurrent marking was in
// progress, false otherwise.
void RescheduleTasksIfNeeded(); bool Pause();
// Schedules asynchronous job to perform concurrent marking at |priority| if
// not already running, otherwise adjusts the number of workers running job
// and the priority if diffrent from the default kUserVisible.
void RescheduleJobIfNeeded(
TaskPriority priority = TaskPriority::kUserVisible);
// Flushes native context sizes to the given table of the main thread. // Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats); void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state. // Flushes memory chunk data using the given marking state.
......
...@@ -5347,7 +5347,7 @@ void Heap::TearDown() { ...@@ -5347,7 +5347,7 @@ void Heap::TearDown() {
DCHECK_EQ(gc_state(), TEAR_DOWN); DCHECK_EQ(gc_state(), TEAR_DOWN);
if (FLAG_concurrent_marking || FLAG_parallel_marking) if (FLAG_concurrent_marking || FLAG_parallel_marking)
concurrent_marking_->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS); concurrent_marking_->Pause();
// It's too late for Heap::Verify() here, as parts of the Isolate are // It's too late for Heap::Verify() here, as parts of the Isolate are
// already gone by the time this is called. // already gone by the time this is called.
......
...@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() { ...@@ -246,7 +246,7 @@ void IncrementalMarking::StartMarking() {
MarkRoots(); MarkRoots();
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) { if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleTasks(); heap_->concurrent_marking()->ScheduleJob();
} }
// Ready to start incremental marking. // Ready to start incremental marking.
...@@ -1002,7 +1002,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms, ...@@ -1002,7 +1002,7 @@ StepResult IncrementalMarking::Step(double max_step_size_in_ms,
} }
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
local_marking_worklists()->ShareWork(); local_marking_worklists()->ShareWork();
heap_->concurrent_marking()->RescheduleTasksIfNeeded(); heap_->concurrent_marking()->RescheduleJobIfNeeded();
} }
} }
if (state_ == MARKING) { if (state_ == MARKING) {
......
...@@ -880,12 +880,11 @@ void MarkCompactCollector::Prepare() { ...@@ -880,12 +880,11 @@ void MarkCompactCollector::Prepare() {
heap()->new_space()->original_top_acquire()); heap()->new_space()->original_top_acquire());
} }
void MarkCompactCollector::FinishConcurrentMarking( void MarkCompactCollector::FinishConcurrentMarking() {
ConcurrentMarking::StopRequest stop_request) {
// FinishConcurrentMarking is called for both, concurrent and parallel, // FinishConcurrentMarking is called for both, concurrent and parallel,
// marking. It is safe to call this function when tasks are already finished. // marking. It is safe to call this function when tasks are already finished.
if (FLAG_parallel_marking || FLAG_concurrent_marking) { if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request); heap()->concurrent_marking()->Join();
heap()->concurrent_marking()->FlushMemoryChunkData( heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state()); non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_); heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
...@@ -1639,12 +1638,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() { ...@@ -1639,12 +1638,12 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING); GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) { if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleTasksIfNeeded(); heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
} }
work_to_do = ProcessEphemerons(); work_to_do = ProcessEphemerons();
FinishConcurrentMarking( FinishConcurrentMarking();
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
} }
CHECK(weak_objects_.current_ephemerons.IsEmpty()); CHECK(weak_objects_.current_ephemerons.IsEmpty());
...@@ -1959,12 +1958,12 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -1959,12 +1958,12 @@ void MarkCompactCollector::MarkLiveObjects() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) { if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleTasksIfNeeded(); heap_->concurrent_marking()->RescheduleJobIfNeeded(
TaskPriority::kUserBlocking);
} }
DrainMarkingWorklist(); DrainMarkingWorklist();
FinishConcurrentMarking( FinishConcurrentMarking();
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
DrainMarkingWorklist(); DrainMarkingWorklist();
} }
......
...@@ -478,7 +478,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -478,7 +478,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Stop concurrent marking (either by preempting it right away or waiting for // Stop concurrent marking (either by preempting it right away or waiting for
// it to complete as requested by |stop_request|). // it to complete as requested by |stop_request|).
void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request); void FinishConcurrentMarking();
bool StartCompaction(); bool StartCompaction();
......
...@@ -44,9 +44,8 @@ TEST(ConcurrentMarking) { ...@@ -44,9 +44,8 @@ TEST(ConcurrentMarking) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects); new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleJob();
concurrent_marking->Stop( concurrent_marking->Join();
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking; delete concurrent_marking;
} }
...@@ -67,14 +66,12 @@ TEST(ConcurrentMarkingReschedule) { ...@@ -67,14 +66,12 @@ TEST(ConcurrentMarkingReschedule) {
new ConcurrentMarking(heap, &marking_worklists, &weak_objects); new ConcurrentMarking(heap, &marking_worklists, &weak_objects);
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleJob();
concurrent_marking->Stop( concurrent_marking->Join();
ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded(); concurrent_marking->RescheduleJobIfNeeded();
concurrent_marking->Stop( concurrent_marking->Join();
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking; delete concurrent_marking;
} }
...@@ -96,14 +93,13 @@ TEST(ConcurrentMarkingPreemptAndReschedule) { ...@@ -96,14 +93,13 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleJob();
concurrent_marking->Stop(ConcurrentMarking::StopRequest::PREEMPT_TASKS); concurrent_marking->Pause();
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(marking_worklists.shared(), PublishSegment(marking_worklists.shared(),
ReadOnlyRoots(heap).undefined_value()); ReadOnlyRoots(heap).undefined_value());
concurrent_marking->RescheduleTasksIfNeeded(); concurrent_marking->RescheduleJobIfNeeded();
concurrent_marking->Stop( concurrent_marking->Join();
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
delete concurrent_marking; delete concurrent_marking;
} }
...@@ -117,8 +113,7 @@ TEST(ConcurrentMarkingMarkedBytes) { ...@@ -117,8 +113,7 @@ TEST(ConcurrentMarkingMarkedBytes) {
CcTest::CollectAllGarbage(); CcTest::CollectAllGarbage();
if (!heap->incremental_marking()->IsStopped()) return; if (!heap->incremental_marking()->IsStopped()) return;
heap::SimulateIncrementalMarking(heap, false); heap::SimulateIncrementalMarking(heap, false);
heap->concurrent_marking()->Stop( heap->concurrent_marking()->Join();
ConcurrentMarking::StopRequest::COMPLETE_TASKS_FOR_TESTING);
CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size()); CHECK_GE(heap->concurrent_marking()->TotalMarkedBytes(), root->Size());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment