compiler-dispatcher.cc 23.1 KB
Newer Older
1 2 3 4 5 6
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/compiler-dispatcher/compiler-dispatcher.h"

7 8 9 10
#include "include/v8-platform.h"
#include "include/v8.h"
#include "src/base/platform/time.h"
#include "src/cancelable-task.h"
11
#include "src/compilation-info.h"
12 13
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
14
#include "src/compiler-dispatcher/unoptimized-compile-job.h"
15
#include "src/flags.h"
16 17 18 19 20 21 22
#include "src/objects-inl.h"

namespace v8 {
namespace internal {

namespace {

23 24 25 26 27
enum class ExceptionHandling { kSwallow, kThrow };

bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
                            ExceptionHandling exception_handling) {
  DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
28 29
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherForgroundStep");
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
  switch (job->status()) {
    case CompilerDispatcherJob::Status::kInitial:
      job->PrepareOnMainThread(isolate);
      break;
    case CompilerDispatcherJob::Status::kPrepared:
      job->Compile(false);
      break;
    case CompilerDispatcherJob::Status::kCompiled:
      job->FinalizeOnMainThread(isolate);
      break;
    case CompilerDispatcherJob::Status::kHasErrorsToReport:
      job->ReportErrorsOnMainThread(isolate);
      break;
    case CompilerDispatcherJob::Status::kFailed:
    case CompilerDispatcherJob::Status::kDone:
      UNREACHABLE();
  }
47

48 49
  DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
  if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
50
    isolate->clear_pending_exception();
51
  }
52
  return job->IsFailed();
53 54
}

55
void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
56
  DCHECK(job->NextStepCanRunOnAnyThread());
57 58
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherBackgroundStep");
59 60 61 62 63 64 65
  switch (job->status()) {
    case CompilerDispatcherJob::Status::kPrepared:
      job->Compile(true);
      break;
    default:
      UNREACHABLE();
  }
66 67
}

68 69 70 71
// Theoretically we get 50ms of idle time max, however it's unlikely that
// we'll get all of it so try to be a conservative.
const double kMaxIdleTimeToExpectInMs = 40;

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
class MemoryPressureTask : public CancelableTask {
 public:
  MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
                     CompilerDispatcher* dispatcher);
  ~MemoryPressureTask() override;

  // CancelableTask implementation.
  void RunInternal() override;

 private:
  CompilerDispatcher* dispatcher_;

  DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
};

MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
                                       CancelableTaskManager* task_manager,
                                       CompilerDispatcher* dispatcher)
90
    : CancelableTask(task_manager), dispatcher_(dispatcher) {}
91 92 93 94 95 96 97

MemoryPressureTask::~MemoryPressureTask() {}

void MemoryPressureTask::RunInternal() {
  dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
}

98 99
}  // namespace

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
class CompilerDispatcher::AbortTask : public CancelableTask {
 public:
  AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
            CompilerDispatcher* dispatcher);
  ~AbortTask() override;

  // CancelableTask implementation.
  void RunInternal() override;

 private:
  CompilerDispatcher* dispatcher_;

  DISALLOW_COPY_AND_ASSIGN(AbortTask);
};

CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
                                         CancelableTaskManager* task_manager,
                                         CompilerDispatcher* dispatcher)
118
    : CancelableTask(task_manager), dispatcher_(dispatcher) {}
119 120 121 122 123 124 125

CompilerDispatcher::AbortTask::~AbortTask() {}

void CompilerDispatcher::AbortTask::RunInternal() {
  dispatcher_->AbortInactiveJobs();
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
class CompilerDispatcher::BackgroundTask : public CancelableTask {
 public:
  BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
                 CompilerDispatcher* dispatcher);
  ~BackgroundTask() override;

  // CancelableTask implementation.
  void RunInternal() override;

 private:
  CompilerDispatcher* dispatcher_;

  DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
};

CompilerDispatcher::BackgroundTask::BackgroundTask(
    Isolate* isolate, CancelableTaskManager* task_manager,
    CompilerDispatcher* dispatcher)
144
    : CancelableTask(task_manager), dispatcher_(dispatcher) {}
145 146 147 148 149 150 151

CompilerDispatcher::BackgroundTask::~BackgroundTask() {}

void CompilerDispatcher::BackgroundTask::RunInternal() {
  dispatcher_->DoBackgroundWork();
}

152 153
class CompilerDispatcher::IdleTask : public CancelableIdleTask {
 public:
154 155
  IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
           CompilerDispatcher* dispatcher);
156 157 158 159 160 161 162 163 164 165 166 167
  ~IdleTask() override;

  // CancelableIdleTask implementation.
  void RunInternal(double deadline_in_seconds) override;

 private:
  CompilerDispatcher* dispatcher_;

  DISALLOW_COPY_AND_ASSIGN(IdleTask);
};

CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
168
                                       CancelableTaskManager* task_manager,
169
                                       CompilerDispatcher* dispatcher)
170
    : CancelableIdleTask(task_manager), dispatcher_(dispatcher) {}
171 172 173 174 175 176 177 178 179

CompilerDispatcher::IdleTask::~IdleTask() {}

void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
  dispatcher_->DoIdleWork(deadline_in_seconds);
}

CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
                                       size_t max_stack_size)
180
    : isolate_(isolate),
181
      platform_(platform),
182
      max_stack_size_(max_stack_size),
183
      trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
184
      tracer_(new CompilerDispatcherTracer(isolate_)),
185
      task_manager_(new CancelableTaskManager()),
186
      next_job_id_(0),
187
      shared_to_unoptimized_job_id_(isolate->heap()),
188
      memory_pressure_level_(MemoryPressureLevel::kNone),
189
      abort_(false),
190
      idle_task_scheduled_(false),
191
      num_background_tasks_(0),
192 193
      main_thread_blocking_on_job_(nullptr),
      block_for_testing_(false),
194 195 196 197 198
      semaphore_for_testing_(0) {
  if (trace_compiler_dispatcher_ && !IsEnabled()) {
    PrintF("CompilerDispatcher: dispatcher is disabled\n");
  }
}
199

200
CompilerDispatcher::~CompilerDispatcher() {
201
  // To avoid crashing in unit tests due to unfished jobs.
202
  AbortAll(BlockingBehavior::kBlock);
203
  task_manager_->CancelAndWait();
204
}
205

206
bool CompilerDispatcher::CanEnqueue() {
207 208
  if (!IsEnabled()) return false;

209 210 211 212
  if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
    return false;
  }

213 214 215 216 217
  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    if (abort_) return false;
  }

218 219 220 221 222 223
  return true;
}

bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
  if (!CanEnqueue()) return false;

224
  // We only handle functions (no eval / top-level code / native) that are
225
  // attached to a script.
226
  if (!function->script()->IsScript() || function->is_toplevel() ||
227
      function->native()) {
228 229 230
    return false;
  }

231 232 233
  return true;
}

234 235
CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
    std::unique_ptr<CompilerDispatcherJob> job) {
236
  DCHECK(!job->IsFinished());
237
  JobMap::const_iterator it = InsertJob(std::move(job));
238 239 240 241 242
  ConsiderJobForBackgroundProcessing(it->second.get());
  ScheduleIdleTaskIfNeeded();
  return it->first;
}

243 244
CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep(
    std::unique_ptr<CompilerDispatcherJob> job) {
245
  DCHECK(!job->IsFinished());
246
  JobMap::const_iterator it = InsertJob(std::move(job));
247 248
  if (trace_compiler_dispatcher_) {
    PrintF("CompilerDispatcher: stepping ");
249
    it->second->ShortPrintOnMainThread();
250 251 252 253 254 255 256
    PrintF("\n");
  }
  DoNextStepOnMainThread(isolate_, it->second.get(),
                         ExceptionHandling::kSwallow);
  ConsiderJobForBackgroundProcessing(it->second.get());
  RemoveIfFinished(it);
  ScheduleIdleTaskIfNeeded();
257
  return it->first;
258 259
}

260
bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
261 262
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherEnqueue");
263
  if (!CanEnqueue(function)) return false;
264
  if (IsEnqueued(function)) return true;
265 266 267 268

  if (trace_compiler_dispatcher_) {
    PrintF("CompilerDispatcher: enqueuing ");
    function->ShortPrint();
269
    PrintF(" for parse and compile\n");
270 271
  }

272
  std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
273
      isolate_, tracer_.get(), function, max_stack_size_));
274
  Enqueue(std::move(job));
275 276 277
  return true;
}

278
bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
279 280
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherEnqueueAndStep");
281
  if (!CanEnqueue(function)) return false;
282
  if (IsEnqueued(function)) return true;
283 284

  if (trace_compiler_dispatcher_) {
285
    PrintF("CompilerDispatcher: enqueuing ");
286
    function->ShortPrint();
287
    PrintF(" for parse and compile\n");
288
  }
289

290
  std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
291 292
      isolate_, tracer_.get(), function, max_stack_size_));
  EnqueueAndStep(std::move(job));
293 294 295 296 297
  return true;
}

bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }

298
bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
299 300
  if (jobs_.empty()) return false;
  return GetJobFor(function) != jobs_.end();
301 302
}

303 304
void CompilerDispatcher::WaitForJobIfRunningOnBackground(
    CompilerDispatcherJob* job) {
305 306
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherWaitForBackgroundJob");
307
  RuntimeCallTimerScope runtimeTimer(
308
      isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
309

310 311 312 313 314 315 316 317 318 319 320 321 322 323
  base::LockGuard<base::Mutex> lock(&mutex_);
  if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
    pending_background_jobs_.erase(job);
    return;
  }
  DCHECK_NULL(main_thread_blocking_on_job_);
  main_thread_blocking_on_job_ = job;
  while (main_thread_blocking_on_job_ != nullptr) {
    main_thread_blocking_signal_.Wait(&mutex_);
  }
  DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
  DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
}

324
bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) {
325 326
  if (trace_compiler_dispatcher_) {
    PrintF("CompilerDispatcher: finishing ");
327
    job->ShortPrintOnMainThread();
328 329
    PrintF(" now\n");
  }
330
  WaitForJobIfRunningOnBackground(job);
331
  while (!job->IsFinished()) {
332 333
    DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
  }
334
  return !job->IsFailed();
335
}
336

337 338 339 340 341 342
bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherFinishNow");
  JobMap::const_iterator job = GetJobFor(function);
  CHECK(job != jobs_.end());
  bool result = FinishNow(job->second.get());
343
  RemoveIfFinished(job);
344 345 346
  return result;
}

347
void CompilerDispatcher::FinishAllNow() {
348 349 350 351 352 353 354 355 356 357 358
  // First finish all jobs not running in background
  for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
    CompilerDispatcherJob* job = it->second.get();
    bool is_running_in_background;
    {
      base::LockGuard<base::Mutex> lock(&mutex_);
      is_running_in_background =
          running_background_jobs_.find(job) != running_background_jobs_.end();
      pending_background_jobs_.erase(job);
    }
    if (!is_running_in_background) {
359
      while (!job->IsFinished()) {
360 361 362 363 364 365 366 367
        DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
      }
      it = RemoveIfFinished(it);
    } else {
      ++it;
    }
  }
  // Potentially wait for jobs that were running in background
368 369 370 371 372 373
  for (auto it = jobs_.cbegin(); it != jobs_.cend();
       it = RemoveIfFinished(it)) {
    FinishNow(it->second.get());
  }
}

374
void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
375 376 377
  bool background_tasks_running =
      task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
  if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
378 379
    for (auto& it : jobs_) {
      WaitForJobIfRunningOnBackground(it.second.get());
380 381
      if (trace_compiler_dispatcher_) {
        PrintF("CompilerDispatcher: aborted ");
382
        it.second->ShortPrintOnMainThread();
383 384
        PrintF("\n");
      }
385
      it.second->ResetOnMainThread(isolate_);
386
    }
387
    jobs_.clear();
388
    shared_to_unoptimized_job_id_.Clear();
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
    {
      base::LockGuard<base::Mutex> lock(&mutex_);
      DCHECK(pending_background_jobs_.empty());
      DCHECK(running_background_jobs_.empty());
      abort_ = false;
    }
    return;
  }

  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    abort_ = true;
    pending_background_jobs_.clear();
  }
  AbortInactiveJobs();

  // All running background jobs might already have scheduled idle tasks instead
  // of abort tasks. Schedule a single abort task here to make sure they get
  // processed as soon as possible (and not first when we have idle time).
  ScheduleAbortTask();
}

void CompilerDispatcher::AbortInactiveJobs() {
  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    // Since we schedule two abort tasks per async abort, we might end up
    // here with nothing left to do.
    if (!abort_) return;
  }
418
  for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
419 420
    auto job = it;
    ++it;
421 422
    {
      base::LockGuard<base::Mutex> lock(&mutex_);
423
      if (running_background_jobs_.find(job->second.get()) !=
424 425 426 427
          running_background_jobs_.end()) {
        continue;
      }
    }
428 429
    if (trace_compiler_dispatcher_) {
      PrintF("CompilerDispatcher: aborted ");
430
      job->second->ShortPrintOnMainThread();
431 432
      PrintF("\n");
    }
433
    it = RemoveJob(job);
434 435 436
  }
  if (jobs_.empty()) {
    base::LockGuard<base::Mutex> lock(&mutex_);
437
    if (num_background_tasks_ == 0) abort_ = false;
438
  }
439 440
}

441 442 443 444 445 446 447 448 449 450
void CompilerDispatcher::MemoryPressureNotification(
    v8::MemoryPressureLevel level, bool is_isolate_locked) {
  MemoryPressureLevel previous = memory_pressure_level_.Value();
  memory_pressure_level_.SetValue(level);
  // If we're already under pressure, we haven't accepted new tasks meanwhile
  // and can just return. If we're no longer under pressure, we're also done.
  if (previous != MemoryPressureLevel::kNone ||
      level == MemoryPressureLevel::kNone) {
    return;
  }
451 452 453
  if (trace_compiler_dispatcher_) {
    PrintF("CompilerDispatcher: received memory pressure notification\n");
  }
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
  if (is_isolate_locked) {
    AbortAll(BlockingBehavior::kDontBlock);
  } else {
    {
      base::LockGuard<base::Mutex> lock(&mutex_);
      if (abort_) return;
      // By going into abort mode here, and clearing the
      // pending_background_jobs_, we at keep existing background jobs from
      // picking up more work before the MemoryPressureTask gets executed.
      abort_ = true;
      pending_background_jobs_.clear();
    }
    platform_->CallOnForegroundThread(
        reinterpret_cast<v8::Isolate*>(isolate_),
        new MemoryPressureTask(isolate_, task_manager_.get(), this));
  }
}

472 473
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
    Handle<SharedFunctionInfo> shared) const {
474
  JobId* job_id_ptr = shared_to_unoptimized_job_id_.Find(shared);
475 476 477
  JobMap::const_iterator job = jobs_.end();
  if (job_id_ptr) {
    job = jobs_.find(*job_id_ptr);
478 479
    DCHECK(job == jobs_.end() ||
           job->second->AsUnoptimizedCompileJob()->IsAssociatedWith(shared));
480 481
  }
  return job;
482 483
}

484
void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
485
  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
486
  if (!platform_->IdleTasksEnabled(v8_isolate)) return;
487 488 489 490 491 492 493 494 495
  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    if (idle_task_scheduled_) return;
    idle_task_scheduled_ = true;
  }
  platform_->CallIdleOnForegroundThread(
      v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
}

496 497 498 499 500
void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
  if (jobs_.empty()) return;
  ScheduleIdleTaskFromAnyThread();
}

501 502 503 504 505 506
void CompilerDispatcher::ScheduleAbortTask() {
  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
  platform_->CallOnForegroundThread(
      v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
}

507 508
void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
    CompilerDispatcherJob* job) {
509
  if (!job->NextStepCanRunOnAnyThread()) return;
510 511 512 513 514 515 516 517
  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    pending_background_jobs_.insert(job);
  }
  ScheduleMoreBackgroundTasksIfNeeded();
}

void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
518 519
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
               "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
520 521 522 523
  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    if (pending_background_jobs_.empty()) return;
    if (platform_->NumberOfAvailableBackgroundThreads() <=
524
        num_background_tasks_) {
525 526
      return;
    }
527
    ++num_background_tasks_;
528 529 530 531 532 533 534
  }
  platform_->CallOnBackgroundThread(
      new BackgroundTask(isolate_, task_manager_.get(), this),
      v8::Platform::kShortRunningTask);
}

void CompilerDispatcher::DoBackgroundWork() {
535 536 537 538 539 540 541 542 543 544
  for (;;) {
    CompilerDispatcherJob* job = nullptr;
    {
      base::LockGuard<base::Mutex> lock(&mutex_);
      if (!pending_background_jobs_.empty()) {
        auto it = pending_background_jobs_.begin();
        job = *it;
        pending_background_jobs_.erase(it);
        running_background_jobs_.insert(job);
      }
545
    }
546
    if (job == nullptr) break;
547

548 549 550 551
    if (V8_UNLIKELY(block_for_testing_.Value())) {
      block_for_testing_.SetValue(false);
      semaphore_for_testing_.Wait();
    }
552

553 554 555
    if (trace_compiler_dispatcher_) {
      PrintF("CompilerDispatcher: doing background work\n");
    }
556

557 558 559 560
    DoNextStepOnBackgroundThread(job);
    // Unconditionally schedule an idle task, as all background steps have to be
    // followed by a main thread step.
    ScheduleIdleTaskFromAnyThread();
561

562 563 564 565 566 567 568 569 570 571
    {
      base::LockGuard<base::Mutex> lock(&mutex_);
      running_background_jobs_.erase(job);

      if (main_thread_blocking_on_job_ == job) {
        main_thread_blocking_on_job_ = nullptr;
        main_thread_blocking_signal_.NotifyOne();
      }
    }
  }
572 573 574

  {
    base::LockGuard<base::Mutex> lock(&mutex_);
575
    --num_background_tasks_;
576

577 578 579 580 581 582
    if (running_background_jobs_.empty() && abort_) {
      // This is the last background job that finished. The abort task
      // scheduled by AbortAll might already have ran, so schedule another
      // one to be on the safe side.
      ScheduleAbortTask();
    }
583 584 585
  }
  // Don't touch |this| anymore after this point, as it might have been
  // deleted.
586 587 588
}

void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
589
  bool aborted = false;
590 591 592
  {
    base::LockGuard<base::Mutex> lock(&mutex_);
    idle_task_scheduled_ = false;
593 594 595 596 597 598
    aborted = abort_;
  }

  if (aborted) {
    AbortInactiveJobs();
    return;
599
  }
600 601 602

  // Number of jobs that are unlikely to make progress during any idle callback
  // due to their estimated duration.
603
  size_t too_long_jobs = 0;
604 605 606 607 608 609

  // Iterate over all available jobs & remaining time. For each job, decide
  // whether to 1) skip it (if it would take too long), 2) erase it (if it's
  // finished), or 3) make progress on it.
  double idle_time_in_seconds =
      deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
610 611 612 613 614 615

  if (trace_compiler_dispatcher_) {
    PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
           idle_time_in_seconds *
               static_cast<double>(base::Time::kMillisecondsPerSecond));
  }
616 617
  for (auto job = jobs_.cbegin();
       job != jobs_.cend() && idle_time_in_seconds > 0.0;
618 619
       idle_time_in_seconds =
           deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
620 621 622 623 624
    // Don't work on jobs that are being worked on by background tasks.
    // Similarly, remove jobs we work on from the set of available background
    // jobs.
    std::unique_ptr<base::LockGuard<base::Mutex>> lock(
        new base::LockGuard<base::Mutex>(&mutex_));
625 626 627
    if (running_background_jobs_.find(job->second.get()) !=
        running_background_jobs_.end()) {
      ++job;
628 629
      continue;
    }
630 631
    auto it = pending_background_jobs_.find(job->second.get());
    double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
632 633 634
    if (idle_time_in_seconds <
        (estimate_in_ms /
         static_cast<double>(base::Time::kMillisecondsPerSecond))) {
635
      // If there's not enough time left, try to estimate whether we would
636 637 638
      // have managed to finish the job in a large idle task to assess
      // whether we should ask for another idle callback.
      if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
639
      if (it == pending_background_jobs_.end()) {
640
        lock.reset();
641
        ConsiderJobForBackgroundProcessing(job->second.get());
642
      }
643
      ++job;
644
    } else if (job->second->IsFinished()) {
645
      DCHECK(it == pending_background_jobs_.end());
646 647
      lock.reset();
      job = RemoveJob(job);
648
      continue;
649 650 651
    } else {
      // Do one step, and keep processing the job (as we don't advance the
      // iterator).
652 653
      if (it != pending_background_jobs_.end()) {
        pending_background_jobs_.erase(it);
654 655
      }
      lock.reset();
656 657
      DoNextStepOnMainThread(isolate_, job->second.get(),
                             ExceptionHandling::kSwallow);
658 659 660 661 662
    }
  }
  if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
}

663 664
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
    JobMap::const_iterator job) {
665
  if (!job->second->IsFinished()) {
666 667 668 669
    return job;
  }

  if (trace_compiler_dispatcher_) {
670
    bool result = !job->second->IsFailed();
671
    PrintF("CompilerDispatcher: finished working on ");
672
    job->second->ShortPrintOnMainThread();
673 674 675 676 677 678 679
    PrintF(": %s\n", result ? "success" : "failure");
    tracer_->DumpStatistics();
  }

  return RemoveJob(job);
}

680 681 682 683 684 685 686 687 688 689 690 691
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
    std::unique_ptr<CompilerDispatcherJob> job) {
  bool added;
  JobMap::const_iterator it;
  std::tie(it, added) =
      jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
  DCHECK(added);

  JobId id = it->first;
  CompilerDispatcherJob* inserted_job = it->second.get();

  // Maps unoptimized jobs' SFIs to their job id.
692 693
  if (inserted_job->type() ==
      CompilerDispatcherJob::Type::kUnoptimizedCompile) {
694 695 696 697 698 699 700 701 702 703
    Handle<SharedFunctionInfo> shared =
        inserted_job->AsUnoptimizedCompileJob()->shared();
    if (!shared.is_null()) {
      shared_to_unoptimized_job_id_.Set(shared, id);
    }
  }

  return it;
}

704
CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
705 706
    CompilerDispatcher::JobMap::const_iterator it) {
  CompilerDispatcherJob* job = it->second.get();
707
  job->ResetOnMainThread(isolate_);
708 709

  // Unmaps unoptimized jobs' SFIs to their job id.
710
  if (job->type() == CompilerDispatcherJob::Type::kUnoptimizedCompile) {
711 712 713 714 715 716 717
    Handle<SharedFunctionInfo> shared =
        job->AsUnoptimizedCompileJob()->shared();
    if (!shared.is_null()) {
      JobId deleted_id = shared_to_unoptimized_job_id_.Delete(shared);
      USE(deleted_id);
      DCHECK_EQ(it->first, deleted_id);
    }
718
  }
719 720

  it = jobs_.erase(it);
721 722 723 724
  if (jobs_.empty()) {
    base::LockGuard<base::Mutex> lock(&mutex_);
    if (num_background_tasks_ == 0) abort_ = false;
  }
725
  return it;
726 727
}

728 729
}  // namespace internal
}  // namespace v8