Commit 8e98695c authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[compiler-dispatcher] Add locking around SFI->Job map

Due to streaming, the SFI enqueueing can happen concurrently with with
main-thread finalising, so we need to add locks around accesses to the
SFI->Job map.

Bug: v8:12370
Change-Id: I60281a954ef10f7fcde559b9529077a6b9a82c31
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3277874
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77869}
parent b9a48232
...@@ -113,15 +113,16 @@ void LazyCompileDispatcher::Enqueue( ...@@ -113,15 +113,16 @@ void LazyCompileDispatcher::Enqueue(
bool LazyCompileDispatcher::IsEnqueued( bool LazyCompileDispatcher::IsEnqueued(
Handle<SharedFunctionInfo> function) const { Handle<SharedFunctionInfo> function) const {
base::MutexGuard lock(&mutex_);
return shared_to_unoptimized_job_.Find(function) != nullptr; return shared_to_unoptimized_job_.Find(function) != nullptr;
} }
void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(Job* job) { void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(
Job* job, const base::MutexGuard& lock) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompilerDispatcherWaitForBackgroundJob"); "V8.LazyCompilerDispatcherWaitForBackgroundJob");
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher); RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
base::MutexGuard lock(&mutex_);
if (!job->is_running_on_background()) { if (!job->is_running_on_background()) {
num_jobs_for_background_ -= pending_background_jobs_.erase(job); num_jobs_for_background_ -= pending_background_jobs_.erase(job);
if (job->state == Job::State::kPending) { if (job->state == Job::State::kPending) {
...@@ -150,9 +151,14 @@ bool LazyCompileDispatcher::FinishNow(Handle<SharedFunctionInfo> function) { ...@@ -150,9 +151,14 @@ bool LazyCompileDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
PrintF(" now\n"); PrintF(" now\n");
} }
Job* job = GetJobFor(function); Job* job;
WaitForJobIfRunningOnBackground(job);
shared_to_unoptimized_job_.Delete(function, &job); {
base::MutexGuard lock(&mutex_);
job = GetJobFor(function, lock);
WaitForJobIfRunningOnBackground(job, lock);
shared_to_unoptimized_job_.Delete(function, &job);
}
if (job->state == Job::State::kPendingToRunOnForeground) { if (job->state == Job::State::kPendingToRunOnForeground) {
job->task->Run(); job->task->Run();
...@@ -174,9 +180,9 @@ void LazyCompileDispatcher::AbortJob(Handle<SharedFunctionInfo> shared_info) { ...@@ -174,9 +180,9 @@ void LazyCompileDispatcher::AbortJob(Handle<SharedFunctionInfo> shared_info) {
shared_info->ShortPrint(); shared_info->ShortPrint();
PrintF("\n"); PrintF("\n");
} }
Job* job = GetJobFor(shared_info);
base::LockGuard<base::Mutex> lock(&mutex_); base::LockGuard<base::Mutex> lock(&mutex_);
Job* job = GetJobFor(shared_info, lock);
num_jobs_for_background_ -= pending_background_jobs_.erase(job); num_jobs_for_background_ -= pending_background_jobs_.erase(job);
if (job->is_running_on_background()) { if (job->is_running_on_background()) {
// Job is currently running on the background thread, wait until it's done // Job is currently running on the background thread, wait until it's done
...@@ -201,6 +207,7 @@ void LazyCompileDispatcher::AbortAll() { ...@@ -201,6 +207,7 @@ void LazyCompileDispatcher::AbortAll() {
idle_task_manager_->CancelAndWait(); idle_task_manager_->CancelAndWait();
{ {
base::MutexGuard lock(&mutex_);
SharedToJobMap::IteratableScope iteratable_scope( SharedToJobMap::IteratableScope iteratable_scope(
&shared_to_unoptimized_job_); &shared_to_unoptimized_job_);
for (Job** job_entry : iteratable_scope) { for (Job** job_entry : iteratable_scope) {
...@@ -214,7 +221,7 @@ void LazyCompileDispatcher::AbortAll() { ...@@ -214,7 +221,7 @@ void LazyCompileDispatcher::AbortAll() {
} }
LazyCompileDispatcher::Job* LazyCompileDispatcher::GetJobFor( LazyCompileDispatcher::Job* LazyCompileDispatcher::GetJobFor(
Handle<SharedFunctionInfo> shared) const { Handle<SharedFunctionInfo> shared, const base::MutexGuard&) const {
return *shared_to_unoptimized_job_.Find(shared); return *shared_to_unoptimized_job_.Find(shared);
} }
......
...@@ -137,8 +137,9 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher { ...@@ -137,8 +137,9 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
using SharedToJobMap = IdentityMap<Job*, FreeStoreAllocationPolicy>; using SharedToJobMap = IdentityMap<Job*, FreeStoreAllocationPolicy>;
void WaitForJobIfRunningOnBackground(Job* job); void WaitForJobIfRunningOnBackground(Job* job, const base::MutexGuard&);
Job* GetJobFor(Handle<SharedFunctionInfo> shared) const; Job* GetJobFor(Handle<SharedFunctionInfo> shared,
const base::MutexGuard&) const;
void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&); void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&);
void DoBackgroundWork(JobDelegate* delegate); void DoBackgroundWork(JobDelegate* delegate);
void DoIdleWork(double deadline_in_seconds); void DoIdleWork(double deadline_in_seconds);
...@@ -163,21 +164,22 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher { ...@@ -163,21 +164,22 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
std::unique_ptr<CancelableTaskManager> idle_task_manager_; std::unique_ptr<CancelableTaskManager> idle_task_manager_;
// The following members can be accessed from any thread. Methods need to hold
// the mutex |mutex_| while accessing them.
mutable base::Mutex mutex_;
// Mapping from SharedFunctionInfo to the corresponding unoptimized // Mapping from SharedFunctionInfo to the corresponding unoptimized
// compilation job. // compilation job.
SharedToJobMap shared_to_unoptimized_job_; SharedToJobMap shared_to_unoptimized_job_;
// The following members can be accessed from any thread. Methods need to hold
// the mutex |mutex_| while accessing them.
base::Mutex mutex_;
// True if an idle task is scheduled to be run. // True if an idle task is scheduled to be run.
bool idle_task_scheduled_; bool idle_task_scheduled_;
// The set of jobs that can be run on a background thread. // The set of jobs that can be run on a background thread.
std::unordered_set<Job*> pending_background_jobs_; std::unordered_set<Job*> pending_background_jobs_;
// The total number of jobs, pending and running. // The total number of jobs ready to execute on background, both those pending
// and those currently running.
std::atomic<size_t> num_jobs_for_background_; std::atomic<size_t> num_jobs_for_background_;
// If not nullptr, then the main thread waits for the task processing // If not nullptr, then the main thread waits for the task processing
......
...@@ -403,8 +403,9 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskNoIdleTime) { ...@@ -403,8 +403,9 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskNoIdleTime) {
// Job should be ready to finalize. // Job should be ready to finalize.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_EQ(dispatcher.GetJobFor(shared)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_TRUE(platform.IdleTaskPending()); ASSERT_TRUE(platform.IdleTaskPending());
// Grant no idle time and have time advance beyond it in one step. // Grant no idle time and have time advance beyond it in one step.
...@@ -416,8 +417,9 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskNoIdleTime) { ...@@ -416,8 +417,9 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskNoIdleTime) {
// Job should be ready to finalize. // Job should be ready to finalize.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_EQ(dispatcher.GetJobFor(shared)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
// Now grant a lot of idle time and freeze time. // Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0); platform.RunIdleTask(1000.0, 0.0);
...@@ -448,10 +450,14 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskSmallIdleTime) { ...@@ -448,10 +450,14 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskSmallIdleTime) {
// Both jobs should be ready to finalize. // Both jobs should be ready to finalize.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2);
ASSERT_EQ(dispatcher.GetJobFor(shared_1)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
ASSERT_EQ(dispatcher.GetJobFor(shared_2)->state, ->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize); LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_EQ(
dispatcher.GetJobFor(shared_2, base::MutexGuard(&dispatcher.mutex_))
->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_TRUE(platform.IdleTaskPending()); ASSERT_TRUE(platform.IdleTaskPending());
// Grant a small anount of idle time and have time advance beyond it in one // Grant a small anount of idle time and have time advance beyond it in one
...@@ -461,11 +467,15 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskSmallIdleTime) { ...@@ -461,11 +467,15 @@ TEST_F(LazyCompileDispatcherTest, IdleTaskSmallIdleTime) {
// Only one of the jobs should be finalized. // Only one of the jobs should be finalized.
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
if (dispatcher.IsEnqueued(shared_1)) { if (dispatcher.IsEnqueued(shared_1)) {
ASSERT_EQ(dispatcher.GetJobFor(shared_1)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
} else { } else {
ASSERT_EQ(dispatcher.GetJobFor(shared_2)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared_2, base::MutexGuard(&dispatcher.mutex_))
->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
} }
ASSERT_NE(dispatcher.IsEnqueued(shared_1), dispatcher.IsEnqueued(shared_2)); ASSERT_NE(dispatcher.IsEnqueued(shared_1), dispatcher.IsEnqueued(shared_2));
ASSERT_NE(shared_1->is_compiled(), shared_2->is_compiled()); ASSERT_NE(shared_1->is_compiled(), shared_2->is_compiled());
...@@ -524,8 +534,9 @@ TEST_F(LazyCompileDispatcherTest, FinishNowWithWorkerTask) { ...@@ -524,8 +534,9 @@ TEST_F(LazyCompileDispatcherTest, FinishNowWithWorkerTask) {
ASSERT_TRUE(dispatcher.IsEnqueued(shared)); ASSERT_TRUE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled()); ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_NE(dispatcher.GetJobFor(shared)->state, ASSERT_NE(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_TRUE(platform.JobTaskPending()); ASSERT_TRUE(platform.JobTaskPending());
// This does not block, but races with the FinishNow() call below. // This does not block, but races with the FinishNow() call below.
...@@ -611,8 +622,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobNotStarted) { ...@@ -611,8 +622,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobNotStarted) {
ASSERT_FALSE(shared->is_compiled()); ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_NE(dispatcher.GetJobFor(shared)->state, ASSERT_NE(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_TRUE(platform.JobTaskPending()); ASSERT_TRUE(platform.JobTaskPending());
dispatcher.AbortJob(shared); dispatcher.AbortJob(shared);
...@@ -635,8 +647,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobAlreadyStarted) { ...@@ -635,8 +647,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobAlreadyStarted) {
ASSERT_FALSE(shared->is_compiled()); ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_NE(dispatcher.GetJobFor(shared)->state, ASSERT_NE(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_TRUE(platform.JobTaskPending()); ASSERT_TRUE(platform.JobTaskPending());
// Have dispatcher block on the background thread when running the job. // Have dispatcher block on the background thread when running the job.
...@@ -663,8 +676,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobAlreadyStarted) { ...@@ -663,8 +676,9 @@ TEST_F(LazyCompileDispatcherTest, AbortJobAlreadyStarted) {
// Job should have finished running and then been aborted. // Job should have finished running and then been aborted.
ASSERT_FALSE(shared->is_compiled()); ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 1);
ASSERT_EQ(dispatcher.GetJobFor(shared)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kAborted); dispatcher.GetJobFor(shared, base::MutexGuard(&dispatcher.mutex_))->state,
LazyCompileDispatcher::Job::State::kAborted);
ASSERT_FALSE(platform.JobTaskPending()); ASSERT_FALSE(platform.JobTaskPending());
ASSERT_TRUE(platform.IdleTaskPending()); ASSERT_TRUE(platform.IdleTaskPending());
...@@ -748,10 +762,14 @@ TEST_F(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread) { ...@@ -748,10 +762,14 @@ TEST_F(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread) {
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2); EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared_2);
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2);
ASSERT_NE(dispatcher.GetJobFor(shared_1)->state, ASSERT_NE(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
ASSERT_NE(dispatcher.GetJobFor(shared_2)->state, ->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize); LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_NE(
dispatcher.GetJobFor(shared_2, base::MutexGuard(&dispatcher.mutex_))
->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_TRUE(dispatcher.IsEnqueued(shared_1)); ASSERT_TRUE(dispatcher.IsEnqueued(shared_1));
ASSERT_TRUE(dispatcher.IsEnqueued(shared_2)); ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
...@@ -765,10 +783,14 @@ TEST_F(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread) { ...@@ -765,10 +783,14 @@ TEST_F(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_TRUE(platform.IdleTaskPending()); ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.JobTaskPending()); ASSERT_FALSE(platform.JobTaskPending());
ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2); ASSERT_EQ(dispatcher.shared_to_unoptimized_job_.size(), 2);
ASSERT_EQ(dispatcher.GetJobFor(shared_1)->state, ASSERT_EQ(
LazyCompileDispatcher::Job::State::kReadyToFinalize); dispatcher.GetJobFor(shared_1, base::MutexGuard(&dispatcher.mutex_))
ASSERT_EQ(dispatcher.GetJobFor(shared_2)->state, ->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize); LazyCompileDispatcher::Job::State::kReadyToFinalize);
ASSERT_EQ(
dispatcher.GetJobFor(shared_2, base::MutexGuard(&dispatcher.mutex_))
->state,
LazyCompileDispatcher::Job::State::kReadyToFinalize);
// Now grant a lot of idle time and freeze time. // Now grant a lot of idle time and freeze time.
platform.RunIdleTask(1000.0, 0.0); platform.RunIdleTask(1000.0, 0.0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment