Commit 823a9182 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[CompilerDispatcher] Simplify Abort logic and remove MemoryPressure notifications.

The memory pressure notification logic wasn't correct and given the current users of
the compiler dispatcher aren't posting speculative tasks, it isn't particularly useful.
After removing this, the abort logic can also be simplified significantly by removing the
non-blocking abort logic.

BUG=v8:8041

Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: I584533b58fb717fdca46cc620822914d6bdb28b8
Reviewed-on: https://chromium-review.googlesource.com/c/1278495Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56609}
parent fa8af5a7
...@@ -8663,8 +8663,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) { ...@@ -8663,8 +8663,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
: i::ThreadId::Current().Equals(isolate->thread_id()); : i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread); isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level); isolate->allocator()->MemoryPressureNotification(level);
isolate->compiler_dispatcher()->MemoryPressureNotification(level,
on_isolate_thread);
} }
void Isolate::EnableMemorySavingsMode() { void Isolate::EnableMemorySavingsMode() {
......
...@@ -43,10 +43,6 @@ class V8_EXPORT_PRIVATE CancelableTaskManager { ...@@ -43,10 +43,6 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted }; enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
TryAbortResult TryAbort(Id id); TryAbortResult TryAbort(Id id);
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
void CancelAndWait();
// Tries to cancel all remaining registered tasks. The return value indicates // Tries to cancel all remaining registered tasks. The return value indicates
// whether // whether
// //
...@@ -58,6 +54,13 @@ class V8_EXPORT_PRIVATE CancelableTaskManager { ...@@ -58,6 +54,13 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// 3) All registered tasks were cancelled (kTaskAborted). // 3) All registered tasks were cancelled (kTaskAborted).
TryAbortResult TryAbortAll(); TryAbortResult TryAbortAll();
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
void CancelAndWait();
// Returns true of the task manager has been cancelled.
bool canceled() const { return canceled_; }
private: private:
// Only called by {Cancelable} destructor. The task is done with executing, // Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed. // but needs to be removed.
......
...@@ -39,8 +39,6 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform, ...@@ -39,8 +39,6 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
task_manager_(new CancelableTaskManager()), task_manager_(new CancelableTaskManager()),
next_job_id_(0), next_job_id_(0),
shared_to_unoptimized_job_id_(isolate->heap()), shared_to_unoptimized_job_id_(isolate->heap()),
memory_pressure_level_(MemoryPressureLevel::kNone),
abort_(false),
idle_task_scheduled_(false), idle_task_scheduled_(false),
num_worker_tasks_(0), num_worker_tasks_(0),
main_thread_blocking_on_job_(nullptr), main_thread_blocking_on_job_(nullptr),
...@@ -52,26 +50,8 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform, ...@@ -52,26 +50,8 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
} }
CompilerDispatcher::~CompilerDispatcher() { CompilerDispatcher::~CompilerDispatcher() {
// To avoid crashing in unit tests due to unfished jobs. // AbortAll must be called before CompilerDispatcher is destroyed.
AbortAll(BlockingBehavior::kBlock); CHECK(task_manager_->canceled());
task_manager_->CancelAndWait();
}
bool CompilerDispatcher::CanEnqueue() {
if (!IsEnabled()) return false;
// TODO(rmcilroy): Investigate if MemoryPressureLevel::kNone is ever sent on
// Android, if not, remove this check.
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
return false;
}
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (abort_) return false;
}
return true;
} }
base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue( base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
...@@ -82,7 +62,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue( ...@@ -82,7 +62,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
RuntimeCallTimerScope runtimeTimer( RuntimeCallTimerScope runtimeTimer(
isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher); isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
if (!CanEnqueue()) return base::nullopt; if (!IsEnabled()) return base::nullopt;
std::unique_ptr<Job> job = base::make_unique<Job>(new BackgroundCompileTask( std::unique_ptr<Job> job = base::make_unique<Job>(new BackgroundCompileTask(
allocator_, outer_parse_info, function_name, function_literal, allocator_, outer_parse_info, function_name, function_literal,
...@@ -196,98 +176,24 @@ bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) { ...@@ -196,98 +176,24 @@ bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
return success; return success;
} }
void CompilerDispatcher::AbortAll(BlockingBehavior blocking) { void CompilerDispatcher::AbortAll() {
bool background_tasks_running = task_manager_->TryAbortAll();
task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
for (auto& it : jobs_) {
WaitForJobIfRunningOnBackground(it.second.get());
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: aborted job %zu\n", it.first);
}
}
jobs_.clear();
shared_to_unoptimized_job_id_.Clear();
{
base::LockGuard<base::Mutex> lock(&mutex_);
DCHECK(pending_background_jobs_.empty());
DCHECK(running_background_jobs_.empty());
abort_ = false;
}
return;
}
{
base::LockGuard<base::Mutex> lock(&mutex_);
abort_ = true;
pending_background_jobs_.clear();
idle_task_scheduled_ = false; // Idle task cancelled by TryAbortAll.
}
AbortInactiveJobs();
// All running background jobs might already have scheduled idle tasks instead
// of abort tasks. Schedule a single abort task here to make sure they get
// processed as soon as possible (and not first when we have idle time).
ScheduleAbortTask();
}
void CompilerDispatcher::AbortInactiveJobs() { for (auto& it : jobs_) {
{ WaitForJobIfRunningOnBackground(it.second.get());
base::LockGuard<base::Mutex> lock(&mutex_);
// Since we schedule two abort tasks per async abort, we might end up
// here with nothing left to do.
if (!abort_) return;
}
for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
auto job = it;
++it;
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (running_background_jobs_.find(job->second.get()) !=
running_background_jobs_.end()) {
continue;
}
}
if (trace_compiler_dispatcher_) { if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: aborted job %zu\n", job->first); PrintF("CompilerDispatcher: aborted job %zu\n", it.first);
} }
it = RemoveJob(job);
} }
if (jobs_.empty()) { jobs_.clear();
shared_to_unoptimized_job_id_.Clear();
{
base::LockGuard<base::Mutex> lock(&mutex_); base::LockGuard<base::Mutex> lock(&mutex_);
if (num_worker_tasks_ == 0) abort_ = false; DCHECK(pending_background_jobs_.empty());
DCHECK(running_background_jobs_.empty());
} }
}
void CompilerDispatcher::MemoryPressureNotification( task_manager_->CancelAndWait();
v8::MemoryPressureLevel level, bool is_isolate_locked) {
MemoryPressureLevel previous = memory_pressure_level_.Value();
memory_pressure_level_.SetValue(level);
// If we're already under pressure, we haven't accepted new tasks meanwhile
// and can just return. If we're no longer under pressure, we're also done.
if (previous != MemoryPressureLevel::kNone ||
level == MemoryPressureLevel::kNone) {
return;
}
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: received memory pressure notification\n");
}
if (is_isolate_locked) {
AbortAll(BlockingBehavior::kDontBlock);
} else {
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (abort_) return;
// By going into abort mode here, and clearing the
// pending_background_jobs_, we at keep existing background jobs from
// picking up more work before the MemoryPressureTask gets executed.
abort_ = true;
pending_background_jobs_.clear();
}
taskrunner_->PostTask(MakeCancelableLambdaTask(task_manager_.get(), [this] {
AbortAll(BlockingBehavior::kDontBlock);
}));
}
} }
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor( CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
...@@ -303,7 +209,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor( ...@@ -303,7 +209,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
void CompilerDispatcher::ScheduleIdleTaskFromAnyThread( void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
const base::LockGuard<base::Mutex>&) { const base::LockGuard<base::Mutex>&) {
if (!taskrunner_->IdleTasksEnabled()) return; if (!taskrunner_->IdleTasksEnabled()) return;
if (idle_task_scheduled_ || abort_) return; if (idle_task_scheduled_) return;
idle_task_scheduled_ = true; idle_task_scheduled_ = true;
taskrunner_->PostIdleTask(MakeCancelableIdleLambdaTask( taskrunner_->PostIdleTask(MakeCancelableIdleLambdaTask(
...@@ -311,11 +217,6 @@ void CompilerDispatcher::ScheduleIdleTaskFromAnyThread( ...@@ -311,11 +217,6 @@ void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
[this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); })); [this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); }));
} }
void CompilerDispatcher::ScheduleAbortTask() {
taskrunner_->PostTask(MakeCancelableLambdaTask(
task_manager_.get(), [this] { AbortInactiveJobs(); }));
}
void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() { void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded"); "V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
...@@ -379,13 +280,6 @@ void CompilerDispatcher::DoBackgroundWork() { ...@@ -379,13 +280,6 @@ void CompilerDispatcher::DoBackgroundWork() {
{ {
base::LockGuard<base::Mutex> lock(&mutex_); base::LockGuard<base::Mutex> lock(&mutex_);
--num_worker_tasks_; --num_worker_tasks_;
if (running_background_jobs_.empty() && abort_) {
// This is the last background job that finished. The abort task
// scheduled by AbortAll might already have ran, so schedule another
// one to be on the safe side.
ScheduleAbortTask();
}
} }
// Don't touch |this| anymore after this point, as it might have been // Don't touch |this| anymore after this point, as it might have been
// deleted. // deleted.
...@@ -394,16 +288,9 @@ void CompilerDispatcher::DoBackgroundWork() { ...@@ -394,16 +288,9 @@ void CompilerDispatcher::DoBackgroundWork() {
void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) { void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherDoIdleWork"); "V8.CompilerDispatcherDoIdleWork");
bool aborted = false;
{ {
base::LockGuard<base::Mutex> lock(&mutex_); base::LockGuard<base::Mutex> lock(&mutex_);
idle_task_scheduled_ = false; idle_task_scheduled_ = false;
aborted = abort_;
}
if (aborted) {
AbortInactiveJobs();
return;
} }
if (trace_compiler_dispatcher_) { if (trace_compiler_dispatcher_) {
...@@ -468,13 +355,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob( ...@@ -468,13 +355,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
} }
// Delete job. // Delete job.
it = jobs_.erase(it); return jobs_.erase(it);
if (jobs_.empty()) {
base::LockGuard<base::Mutex> lock(&mutex_);
if (num_worker_tasks_ == 0) abort_ = false;
}
return it;
} }
} // namespace internal } // namespace internal
......
...@@ -101,15 +101,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { ...@@ -101,15 +101,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// possible). Returns true if the compile job was successful. // possible). Returns true if the compile job was successful.
bool FinishNow(Handle<SharedFunctionInfo> function); bool FinishNow(Handle<SharedFunctionInfo> function);
// Aborts a given job. Blocks if requested. // Aborts all jobs, blocking until all jobs are aborted.
void Abort(Handle<SharedFunctionInfo> function, BlockingBehavior blocking); void AbortAll();
// Aborts all jobs. Blocks if requested.
void AbortAll(BlockingBehavior blocking);
// Memory pressure notifications from the embedder.
void MemoryPressureNotification(v8::MemoryPressureLevel level,
bool is_isolate_locked);
private: private:
FRIEND_TEST(CompilerDispatcherTest, IdleTaskNoIdleTime); FRIEND_TEST(CompilerDispatcherTest, IdleTaskNoIdleTime);
...@@ -117,13 +110,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { ...@@ -117,13 +110,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
FRIEND_TEST(CompilerDispatcherTest, FinishNowWithWorkerTask); FRIEND_TEST(CompilerDispatcherTest, FinishNowWithWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask); FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask); FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, FinishNowDuringAbortAll);
FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread); FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread);
class AbortTask;
class WorkerTask;
class IdleTask;
struct Job { struct Job {
explicit Job(BackgroundCompileTask* task_arg); explicit Job(BackgroundCompileTask* task_arg);
~Job(); ~Job();
...@@ -145,13 +133,10 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { ...@@ -145,13 +133,10 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
typedef std::map<JobId, std::unique_ptr<Job>> JobMap; typedef std::map<JobId, std::unique_ptr<Job>> JobMap;
typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap; typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap;
bool CanEnqueue();
void WaitForJobIfRunningOnBackground(Job* job); void WaitForJobIfRunningOnBackground(Job* job);
void AbortInactiveJobs();
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const; JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
void ScheduleMoreWorkerTasksIfNeeded(); void ScheduleMoreWorkerTasksIfNeeded();
void ScheduleIdleTaskFromAnyThread(const base::LockGuard<base::Mutex>&); void ScheduleIdleTaskFromAnyThread(const base::LockGuard<base::Mutex>&);
void ScheduleAbortTask();
void DoBackgroundWork(); void DoBackgroundWork();
void DoIdleWork(double deadline_in_seconds); void DoIdleWork(double deadline_in_seconds);
// Returns iterator to the inserted job. // Returns iterator to the inserted job.
...@@ -182,15 +167,11 @@ class V8_EXPORT_PRIVATE CompilerDispatcher { ...@@ -182,15 +167,11 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// compilation's JobId; // compilation's JobId;
SharedToJobIdMap shared_to_unoptimized_job_id_; SharedToJobIdMap shared_to_unoptimized_job_id_;
base::AtomicValue<v8::MemoryPressureLevel> memory_pressure_level_;
// The following members can be accessed from any thread. Methods need to hold // The following members can be accessed from any thread. Methods need to hold
// the mutex |mutex_| while accessing them. // the mutex |mutex_| while accessing them.
base::Mutex mutex_; base::Mutex mutex_;
// True if the dispatcher is in the process of aborting running tasks. // True if an idle task is scheduled to be run.
bool abort_;
bool idle_task_scheduled_; bool idle_task_scheduled_;
// Number of scheduled or running WorkerTask objects. // Number of scheduled or running WorkerTask objects.
......
...@@ -2807,7 +2807,7 @@ void Isolate::Deinit() { ...@@ -2807,7 +2807,7 @@ void Isolate::Deinit() {
delete heap_profiler_; delete heap_profiler_;
heap_profiler_ = nullptr; heap_profiler_ = nullptr;
compiler_dispatcher_->AbortAll(BlockingBehavior::kBlock); compiler_dispatcher_->AbortAll();
delete compiler_dispatcher_; delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr; compiler_dispatcher_ = nullptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment