Commit 823a9182 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[CompilerDispatcher] Simplify Abort logic and remove MemoryPressure notifications.

The memory pressure notification logic wasn't correct and given the current users of
the compiler dispatcher aren't posting speculative tasks, it isn't particularly useful.
After removing this, the abort logic can also be simplified significantly by removing the
non-blocking abort logic.

BUG=v8:8041

Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: I584533b58fb717fdca46cc620822914d6bdb28b8
Reviewed-on: https://chromium-review.googlesource.com/c/1278495Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56609}
parent fa8af5a7
......@@ -8663,8 +8663,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
: i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
isolate->compiler_dispatcher()->MemoryPressureNotification(level,
on_isolate_thread);
}
void Isolate::EnableMemorySavingsMode() {
......
......@@ -43,10 +43,6 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
TryAbortResult TryAbort(Id id);
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
void CancelAndWait();
// Tries to cancel all remaining registered tasks. The return value indicates
// whether
//
......@@ -58,6 +54,13 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// 3) All registered tasks were cancelled (kTaskAborted).
TryAbortResult TryAbortAll();
// Cancels all remaining registered tasks and waits for tasks that are
// already running. This disallows subsequent Register calls.
void CancelAndWait();
// Returns true of the task manager has been cancelled.
bool canceled() const { return canceled_; }
private:
// Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed.
......
......@@ -39,8 +39,6 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
task_manager_(new CancelableTaskManager()),
next_job_id_(0),
shared_to_unoptimized_job_id_(isolate->heap()),
memory_pressure_level_(MemoryPressureLevel::kNone),
abort_(false),
idle_task_scheduled_(false),
num_worker_tasks_(0),
main_thread_blocking_on_job_(nullptr),
......@@ -52,26 +50,8 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
}
CompilerDispatcher::~CompilerDispatcher() {
// To avoid crashing in unit tests due to unfished jobs.
AbortAll(BlockingBehavior::kBlock);
task_manager_->CancelAndWait();
}
bool CompilerDispatcher::CanEnqueue() {
if (!IsEnabled()) return false;
// TODO(rmcilroy): Investigate if MemoryPressureLevel::kNone is ever sent on
// Android, if not, remove this check.
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
return false;
}
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (abort_) return false;
}
return true;
// AbortAll must be called before CompilerDispatcher is destroyed.
CHECK(task_manager_->canceled());
}
base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
......@@ -82,7 +62,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
RuntimeCallTimerScope runtimeTimer(
isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
if (!CanEnqueue()) return base::nullopt;
if (!IsEnabled()) return base::nullopt;
std::unique_ptr<Job> job = base::make_unique<Job>(new BackgroundCompileTask(
allocator_, outer_parse_info, function_name, function_literal,
......@@ -196,10 +176,9 @@ bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
return success;
}
void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
bool background_tasks_running =
task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
void CompilerDispatcher::AbortAll() {
task_manager_->TryAbortAll();
for (auto& it : jobs_) {
WaitForJobIfRunningOnBackground(it.second.get());
if (trace_compiler_dispatcher_) {
......@@ -212,82 +191,9 @@ void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
base::LockGuard<base::Mutex> lock(&mutex_);
DCHECK(pending_background_jobs_.empty());
DCHECK(running_background_jobs_.empty());
abort_ = false;
}
return;
}
{
base::LockGuard<base::Mutex> lock(&mutex_);
abort_ = true;
pending_background_jobs_.clear();
idle_task_scheduled_ = false; // Idle task cancelled by TryAbortAll.
}
AbortInactiveJobs();
// All running background jobs might already have scheduled idle tasks instead
// of abort tasks. Schedule a single abort task here to make sure they get
// processed as soon as possible (and not first when we have idle time).
ScheduleAbortTask();
}
void CompilerDispatcher::AbortInactiveJobs() {
{
base::LockGuard<base::Mutex> lock(&mutex_);
// Since we schedule two abort tasks per async abort, we might end up
// here with nothing left to do.
if (!abort_) return;
}
for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
auto job = it;
++it;
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (running_background_jobs_.find(job->second.get()) !=
running_background_jobs_.end()) {
continue;
}
}
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: aborted job %zu\n", job->first);
}
it = RemoveJob(job);
}
if (jobs_.empty()) {
base::LockGuard<base::Mutex> lock(&mutex_);
if (num_worker_tasks_ == 0) abort_ = false;
}
}
void CompilerDispatcher::MemoryPressureNotification(
v8::MemoryPressureLevel level, bool is_isolate_locked) {
MemoryPressureLevel previous = memory_pressure_level_.Value();
memory_pressure_level_.SetValue(level);
// If we're already under pressure, we haven't accepted new tasks meanwhile
// and can just return. If we're no longer under pressure, we're also done.
if (previous != MemoryPressureLevel::kNone ||
level == MemoryPressureLevel::kNone) {
return;
}
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: received memory pressure notification\n");
}
if (is_isolate_locked) {
AbortAll(BlockingBehavior::kDontBlock);
} else {
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (abort_) return;
// By going into abort mode here, and clearing the
// pending_background_jobs_, we at keep existing background jobs from
// picking up more work before the MemoryPressureTask gets executed.
abort_ = true;
pending_background_jobs_.clear();
}
taskrunner_->PostTask(MakeCancelableLambdaTask(task_manager_.get(), [this] {
AbortAll(BlockingBehavior::kDontBlock);
}));
}
task_manager_->CancelAndWait();
}
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
......@@ -303,7 +209,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
const base::LockGuard<base::Mutex>&) {
if (!taskrunner_->IdleTasksEnabled()) return;
if (idle_task_scheduled_ || abort_) return;
if (idle_task_scheduled_) return;
idle_task_scheduled_ = true;
taskrunner_->PostIdleTask(MakeCancelableIdleLambdaTask(
......@@ -311,11 +217,6 @@ void CompilerDispatcher::ScheduleIdleTaskFromAnyThread(
[this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); }));
}
void CompilerDispatcher::ScheduleAbortTask() {
taskrunner_->PostTask(MakeCancelableLambdaTask(
task_manager_.get(), [this] { AbortInactiveJobs(); }));
}
void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
......@@ -379,13 +280,6 @@ void CompilerDispatcher::DoBackgroundWork() {
{
base::LockGuard<base::Mutex> lock(&mutex_);
--num_worker_tasks_;
if (running_background_jobs_.empty() && abort_) {
// This is the last background job that finished. The abort task
// scheduled by AbortAll might already have ran, so schedule another
// one to be on the safe side.
ScheduleAbortTask();
}
}
// Don't touch |this| anymore after this point, as it might have been
// deleted.
......@@ -394,16 +288,9 @@ void CompilerDispatcher::DoBackgroundWork() {
void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherDoIdleWork");
bool aborted = false;
{
base::LockGuard<base::Mutex> lock(&mutex_);
idle_task_scheduled_ = false;
aborted = abort_;
}
if (aborted) {
AbortInactiveJobs();
return;
}
if (trace_compiler_dispatcher_) {
......@@ -468,13 +355,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
}
// Delete job.
it = jobs_.erase(it);
if (jobs_.empty()) {
base::LockGuard<base::Mutex> lock(&mutex_);
if (num_worker_tasks_ == 0) abort_ = false;
}
return it;
return jobs_.erase(it);
}
} // namespace internal
......
......@@ -101,15 +101,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// possible). Returns true if the compile job was successful.
bool FinishNow(Handle<SharedFunctionInfo> function);
// Aborts a given job. Blocks if requested.
void Abort(Handle<SharedFunctionInfo> function, BlockingBehavior blocking);
// Aborts all jobs. Blocks if requested.
void AbortAll(BlockingBehavior blocking);
// Memory pressure notifications from the embedder.
void MemoryPressureNotification(v8::MemoryPressureLevel level,
bool is_isolate_locked);
// Aborts all jobs, blocking until all jobs are aborted.
void AbortAll();
private:
FRIEND_TEST(CompilerDispatcherTest, IdleTaskNoIdleTime);
......@@ -117,13 +110,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
FRIEND_TEST(CompilerDispatcherTest, FinishNowWithWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, FinishNowDuringAbortAll);
FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread);
class AbortTask;
class WorkerTask;
class IdleTask;
struct Job {
explicit Job(BackgroundCompileTask* task_arg);
~Job();
......@@ -145,13 +133,10 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
typedef std::map<JobId, std::unique_ptr<Job>> JobMap;
typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap;
bool CanEnqueue();
void WaitForJobIfRunningOnBackground(Job* job);
void AbortInactiveJobs();
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
void ScheduleMoreWorkerTasksIfNeeded();
void ScheduleIdleTaskFromAnyThread(const base::LockGuard<base::Mutex>&);
void ScheduleAbortTask();
void DoBackgroundWork();
void DoIdleWork(double deadline_in_seconds);
// Returns iterator to the inserted job.
......@@ -182,15 +167,11 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// compilation's JobId;
SharedToJobIdMap shared_to_unoptimized_job_id_;
base::AtomicValue<v8::MemoryPressureLevel> memory_pressure_level_;
// The following members can be accessed from any thread. Methods need to hold
// the mutex |mutex_| while accessing them.
base::Mutex mutex_;
// True if the dispatcher is in the process of aborting running tasks.
bool abort_;
// True if an idle task is scheduled to be run.
bool idle_task_scheduled_;
// Number of scheduled or running WorkerTask objects.
......
......@@ -2807,7 +2807,7 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = nullptr;
compiler_dispatcher_->AbortAll(BlockingBehavior::kBlock);
compiler_dispatcher_->AbortAll();
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment