Commit 15b1ce39 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[compiler-dispatcher] Port to Jobs API

Port the CompilerDispatcher to use the Jobs API, instead of its own
hand-rolled worker management.

This required some re-thinking of how testing is handled, since the
tests want to be able to

  a) Defer calls to PostTask/Job, to actuall post the jobs later. This
     was easy enough with PostTask, since we could simply store the task
     in a list and no-op, but PostJob has to return a JobHandle. The
     tests now have a DelayedJobHandleWrapper, which defers all method
     calls on itself, and because of all the unique_ptrs, there's also
     now a SharedJobHandleWrapper.

  b) Wait until tasks/jobs complete. Returning from a Task meant that
     the task had completed, but this isn't necessarily the case with
     JobTasks; e.g. a job might be asked to yield. This patch hacks
     around this by Posting and Joining a non-owning copy of the
     requested JobTask, and then re-posting it once Join returns.

Change-Id: If867b4122af52758ffabcfb78a6701f0f95d896d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2563664
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77618}
parent 7cd43456
......@@ -1894,7 +1894,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
// Check if the compiler dispatcher has shared_info enqueued for compile.
LazyCompileDispatcher* dispatcher = isolate->lazy_compile_dispatcher();
if (dispatcher->IsEnqueued(shared_info)) {
if (dispatcher && dispatcher->IsEnqueued(shared_info)) {
if (!dispatcher->FinishNow(shared_info)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
}
......
......@@ -4,7 +4,11 @@
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include <atomic>
#include "include/v8-platform.h"
#include "src/ast/ast.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
#include "src/codegen/compiler.h"
#include "src/flags/flags.h"
......@@ -21,6 +25,24 @@
namespace v8 {
namespace internal {
class LazyCompileDispatcher::JobTask : public v8::JobTask {
public:
explicit JobTask(LazyCompileDispatcher* lazy_compile_dispatcher)
: lazy_compile_dispatcher_(lazy_compile_dispatcher) {}
void Run(JobDelegate* delegate) final {
lazy_compile_dispatcher_->DoBackgroundWork(delegate);
}
size_t GetMaxConcurrency(size_t worker_count) const final {
return lazy_compile_dispatcher_->num_jobs_for_background_.load(
std::memory_order_relaxed);
}
private:
LazyCompileDispatcher* lazy_compile_dispatcher_;
};
LazyCompileDispatcher::Job::Job(BackgroundCompileTask* task_arg)
: task(task_arg), has_run(false), aborted(false) {}
......@@ -39,22 +61,21 @@ LazyCompileDispatcher::LazyCompileDispatcher(Isolate* isolate,
platform_(platform),
max_stack_size_(max_stack_size),
trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
task_manager_(new CancelableTaskManager()),
idle_task_manager_(new CancelableTaskManager()),
next_job_id_(0),
shared_to_unoptimized_job_id_(isolate->heap()),
idle_task_scheduled_(false),
num_worker_tasks_(0),
num_jobs_for_background_(0),
main_thread_blocking_on_job_(nullptr),
block_for_testing_(false),
semaphore_for_testing_(0) {
if (trace_compiler_dispatcher_ && !IsEnabled()) {
PrintF("LazyCompileDispatcher: dispatcher is disabled\n");
}
job_handle_ = platform_->PostJob(TaskPriority::kUserVisible,
std::make_unique<JobTask>(this));
}
LazyCompileDispatcher::~LazyCompileDispatcher() {
// AbortAll must be called before LazyCompileDispatcher is destroyed.
CHECK(task_manager_->canceled());
CHECK(!job_handle_->IsValid());
}
base::Optional<LazyCompileDispatcher::JobId> LazyCompileDispatcher::Enqueue(
......@@ -64,8 +85,6 @@ base::Optional<LazyCompileDispatcher::JobId> LazyCompileDispatcher::Enqueue(
"V8.LazyCompilerDispatcherEnqueue");
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
if (!IsEnabled()) return base::nullopt;
std::unique_ptr<Job> job = std::make_unique<Job>(new BackgroundCompileTask(
isolate_, outer_parse_info, function_name, function_literal,
worker_thread_runtime_call_stats_, background_compile_timer_,
......@@ -83,15 +102,13 @@ base::Optional<LazyCompileDispatcher::JobId> LazyCompileDispatcher::Enqueue(
{
base::MutexGuard lock(&mutex_);
pending_background_jobs_.insert(it->second.get());
num_jobs_for_background_ += 1;
VerifyBackgroundTaskCount(lock);
}
ScheduleMoreWorkerTasksIfNeeded();
job_handle_->NotifyConcurrencyIncrease();
return base::make_optional(id);
}
bool LazyCompileDispatcher::IsEnabled() const {
return FLAG_lazy_compile_dispatcher;
}
bool LazyCompileDispatcher::IsEnqueued(
Handle<SharedFunctionInfo> function) const {
if (jobs_.empty()) return false;
......@@ -139,7 +156,8 @@ void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
base::MutexGuard lock(&mutex_);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
pending_background_jobs_.erase(job);
num_jobs_for_background_ -= pending_background_jobs_.erase(job);
VerifyBackgroundTaskCount(lock);
return;
}
DCHECK_NULL(main_thread_blocking_on_job_);
......@@ -189,7 +207,8 @@ void LazyCompileDispatcher::AbortJob(JobId job_id) {
Job* job = job_it->second.get();
base::LockGuard<base::Mutex> lock(&mutex_);
pending_background_jobs_.erase(job);
num_jobs_for_background_ -= pending_background_jobs_.erase(job);
VerifyBackgroundTaskCount(lock);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
RemoveJob(job_it);
} else {
......@@ -200,23 +219,18 @@ void LazyCompileDispatcher::AbortJob(JobId job_id) {
}
void LazyCompileDispatcher::AbortAll() {
task_manager_->TryAbortAll();
idle_task_manager_->TryAbortAll();
job_handle_->Cancel();
for (auto& it : jobs_) {
WaitForJobIfRunningOnBackground(it.second.get());
if (trace_compiler_dispatcher_) {
PrintF("LazyCompileDispatcher: aborted job %zu\n", it.first);
}
}
jobs_.clear();
shared_to_unoptimized_job_id_.Clear();
{
base::MutexGuard lock(&mutex_);
DCHECK(pending_background_jobs_.empty());
DCHECK(running_background_jobs_.empty());
pending_background_jobs_.clear();
}
task_manager_->CancelAndWait();
jobs_.clear();
shared_to_unoptimized_job_id_.Clear();
idle_task_manager_->CancelAndWait();
}
LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::GetJobFor(
......@@ -235,30 +249,17 @@ void LazyCompileDispatcher::ScheduleIdleTaskFromAnyThread(
if (idle_task_scheduled_) return;
idle_task_scheduled_ = true;
// TODO(leszeks): Using a full task manager for a single cancellable task is
// overkill, we could probably do the cancelling ourselves.
taskrunner_->PostIdleTask(MakeCancelableIdleTask(
task_manager_.get(),
idle_task_manager_.get(),
[this](double deadline_in_seconds) { DoIdleWork(deadline_in_seconds); }));
}
void LazyCompileDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
void LazyCompileDispatcher::DoBackgroundWork(JobDelegate* delegate) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
{
base::MutexGuard lock(&mutex_);
if (pending_background_jobs_.empty()) return;
if (platform_->NumberOfWorkerThreads() <= num_worker_tasks_) {
return;
}
++num_worker_tasks_;
}
platform_->CallOnWorkerThread(
MakeCancelableTask(task_manager_.get(), [this] { DoBackgroundWork(); }));
}
void LazyCompileDispatcher::DoBackgroundWork() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompilerDispatcherDoBackgroundWork");
for (;;) {
"V8.LazyCompileDispatcherDoBackgroundWork");
while (!delegate->ShouldYield()) {
Job* job = nullptr;
{
base::MutexGuard lock(&mutex_);
......@@ -267,6 +268,7 @@ void LazyCompileDispatcher::DoBackgroundWork() {
job = *it;
pending_background_jobs_.erase(it);
running_background_jobs_.insert(job);
VerifyBackgroundTaskCount(lock);
}
}
if (job == nullptr) break;
......@@ -284,7 +286,8 @@ void LazyCompileDispatcher::DoBackgroundWork() {
{
base::MutexGuard lock(&mutex_);
running_background_jobs_.erase(job);
num_jobs_for_background_ -= running_background_jobs_.erase(job);
VerifyBackgroundTaskCount(lock);
job->has_run = true;
if (job->IsReadyToFinalize(lock)) {
......@@ -300,10 +303,6 @@ void LazyCompileDispatcher::DoBackgroundWork() {
}
}
{
base::MutexGuard lock(&mutex_);
--num_worker_tasks_;
}
// Don't touch |this| anymore after this point, as it might have been
// deleted.
}
......@@ -383,5 +382,12 @@ LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::RemoveJob(
return jobs_.erase(it);
}
#ifdef DEBUG
void LazyCompileDispatcher::VerifyBackgroundTaskCount(const base::MutexGuard&) {
CHECK_EQ(num_jobs_for_background_.load(),
running_background_jobs_.size() + pending_background_jobs_.size());
}
#endif
} // namespace internal
} // namespace v8
......@@ -81,9 +81,6 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
LazyCompileDispatcher& operator=(const LazyCompileDispatcher&) = delete;
~LazyCompileDispatcher();
// Returns true if the compiler dispatcher is enabled.
bool IsEnabled() const;
base::Optional<JobId> Enqueue(const ParseInfo* outer_parse_info,
const AstRawString* function_name,
const FunctionLiteral* function_literal);
......@@ -117,6 +114,9 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
FRIEND_TEST(LazyCompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
FRIEND_TEST(LazyCompilerDispatcherTest, CompileMultipleOnBackgroundThread);
// JobTask for PostJob API.
class JobTask;
struct Job {
explicit Job(BackgroundCompileTask* task_arg);
~Job();
......@@ -141,15 +141,20 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
void WaitForJobIfRunningOnBackground(Job* job);
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
void ScheduleMoreWorkerTasksIfNeeded();
void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&);
void DoBackgroundWork();
void DoBackgroundWork(JobDelegate* delegate);
void DoIdleWork(double deadline_in_seconds);
// Returns iterator to the inserted job.
JobMap::const_iterator InsertJob(std::unique_ptr<Job> job);
// Returns iterator following the removed job.
JobMap::const_iterator RemoveJob(JobMap::const_iterator job);
#ifdef DEBUG
void VerifyBackgroundTaskCount(const base::MutexGuard&);
#else
void VerifyBackgroundTaskCount(const base::MutexGuard&) {}
#endif
Isolate* isolate_;
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
TimedHistogram* background_compile_timer_;
......@@ -157,10 +162,12 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
Platform* platform_;
size_t max_stack_size_;
std::unique_ptr<JobHandle> job_handle_;
// Copy of FLAG_trace_compiler_dispatcher to allow for access from any thread.
bool trace_compiler_dispatcher_;
std::unique_ptr<CancelableTaskManager> task_manager_;
std::unique_ptr<CancelableTaskManager> idle_task_manager_;
// Id for next job to be added
JobId next_job_id_;
......@@ -179,15 +186,15 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
// True if an idle task is scheduled to be run.
bool idle_task_scheduled_;
// Number of scheduled or running WorkerTask objects.
int num_worker_tasks_;
// The set of jobs that can be run on a background thread.
std::unordered_set<Job*> pending_background_jobs_;
// The set of jobs currently being run on background threads.
std::unordered_set<Job*> running_background_jobs_;
// The total number of jobs, pending and running.
std::atomic<size_t> num_jobs_for_background_;
// If not nullptr, then the main thread waits for the task processing
// this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
Job* main_thread_blocking_on_job_;
......
......@@ -3201,9 +3201,10 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = nullptr;
compiler_dispatcher_->AbortAll();
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
if (lazy_compile_dispatcher_) {
lazy_compile_dispatcher_->AbortAll();
lazy_compile_dispatcher_.reset();
}
delete baseline_batch_compiler_;
baseline_batch_compiler_ = nullptr;
......@@ -3667,8 +3668,10 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
interpreter_ = new interpreter::Interpreter(this);
bigint_processor_ = bigint::Processor::New(new BigIntPlatform(this));
compiler_dispatcher_ = new LazyCompileDispatcher(
if (FLAG_lazy_compile_dispatcher) {
lazy_compile_dispatcher_ = std::make_unique<LazyCompileDispatcher>(
this, V8::GetCurrentPlatform(), FLAG_stack_size);
}
baseline_batch_compiler_ = new baseline::BaselineBatchCompiler(this);
// Enable logging before setting up the heap
......
......@@ -1676,7 +1676,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
AccountingAllocator* allocator() { return allocator_; }
LazyCompileDispatcher* lazy_compile_dispatcher() const {
return compiler_dispatcher_;
return lazy_compile_dispatcher_.get();
}
baseline::BaselineBatchCompiler* baseline_batch_compiler() const {
......@@ -2161,7 +2161,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// through all compilations (and thus all JSHeapBroker instances).
Zone* compiler_zone_ = nullptr;
LazyCompileDispatcher* compiler_dispatcher_ = nullptr;
std::unique_ptr<LazyCompileDispatcher> lazy_compile_dispatcher_;
baseline::BaselineBatchCompiler* baseline_batch_compiler_ = nullptr;
using InterruptEntry = std::pair<InterruptCallback, void*>;
......
......@@ -170,7 +170,7 @@ UnoptimizedCompileState::UnoptimizedCompileState(Isolate* isolate)
ast_string_constants_(isolate->ast_string_constants()),
logger_(isolate->logger()),
parallel_tasks_(
isolate->lazy_compile_dispatcher()->IsEnabled()
isolate->lazy_compile_dispatcher()
? new ParallelTasks(isolate->lazy_compile_dispatcher())
: nullptr) {}
......
......@@ -9,6 +9,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/semaphore.h"
#include "src/codegen/compiler.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
......@@ -109,19 +110,111 @@ class LazyCompilerDispatcherTest : public TestWithNativeContext {
namespace {
class DeferredPostJob {
public:
class DeferredJobHandle final : public JobHandle {
public:
explicit DeferredJobHandle(DeferredPostJob* owner) : owner_(owner) {
owner->deferred_handle_ = this;
}
~DeferredJobHandle() final {
if (owner_) {
owner_->deferred_handle_ = nullptr;
}
}
void NotifyConcurrencyIncrease() final {
DCHECK(!was_cancelled());
if (real_handle()) {
real_handle()->NotifyConcurrencyIncrease();
}
// No need to defer the NotifyConcurrencyIncrease, we'll automatically
// check concurrency when posting the job.
}
void Cancel() final {
set_cancelled();
if (real_handle()) {
real_handle()->Cancel();
}
}
void Join() final { UNREACHABLE(); }
void CancelAndDetach() final { UNREACHABLE(); }
bool IsActive() final { return real_handle() && real_handle()->IsActive(); }
bool IsValid() final { return owner_->HandleIsValid(); }
void ClearOwner() { owner_ = nullptr; }
private:
JobHandle* real_handle() { return owner_->real_handle_.get(); }
bool was_cancelled() { return owner_->was_cancelled_; }
void set_cancelled() {
DCHECK(!was_cancelled());
owner_->was_cancelled_ = true;
}
DeferredPostJob* owner_;
};
~DeferredPostJob() {
if (deferred_handle_) deferred_handle_->ClearOwner();
}
std::unique_ptr<JobHandle> DeferPostJob(TaskPriority priority,
std::unique_ptr<JobTask> job_task) {
DCHECK_NULL(job_task_);
job_task_ = std::move(job_task);
priority_ = priority;
return std::make_unique<DeferredJobHandle>(this);
}
bool IsPending() { return job_task_ != nullptr; }
void Clear() { job_task_.reset(); }
void DoRealPostJob(Platform* platform) {
real_handle_ = platform->PostJob(priority_, std::move(job_task_));
if (was_cancelled_) {
real_handle_->Cancel();
}
}
void BlockUntilComplete() {
// Join the handle pointed to by the deferred handle. This invalidates that
// handle, but LazyCompileDispatcher still wants to be able to cancel the
// job it posted, so clear the deferred handle to go back to relying on
// was_cancelled for validity.
real_handle_->Join();
real_handle_ = nullptr;
}
bool HandleIsValid() {
return !was_cancelled_ && real_handle_ && real_handle_->IsValid();
}
private:
std::unique_ptr<JobTask> job_task_;
TaskPriority priority_;
// Non-owning pointer to the handle returned by PostJob. The handle holds
// a pointer to this instance, and registers/deregisters itself on
// constuction/destruction.
DeferredJobHandle* deferred_handle_ = nullptr;
std::unique_ptr<JobHandle> real_handle_ = nullptr;
bool was_cancelled_ = false;
};
class MockPlatform : public v8::Platform {
public:
MockPlatform()
: time_(0.0),
time_step_(0.0),
idle_task_(nullptr),
sem_(0),
tracing_controller_(V8::GetCurrentPlatform()->GetTracingController()) {}
~MockPlatform() override {
base::MutexGuard lock(&mutex_);
EXPECT_TRUE(foreground_tasks_.empty());
EXPECT_TRUE(worker_tasks_.empty());
EXPECT_TRUE(idle_task_ == nullptr);
EXPECT_FALSE(deferred_post_job_.HandleIsValid());
base::MutexGuard lock(&idle_task_mutex_);
EXPECT_EQ(idle_task_, nullptr);
}
MockPlatform(const MockPlatform&) = delete;
MockPlatform& operator=(const MockPlatform&) = delete;
......@@ -134,8 +227,7 @@ class MockPlatform : public v8::Platform {
}
void CallOnWorkerThread(std::unique_ptr<Task> task) override {
base::MutexGuard lock(&mutex_);
worker_tasks_.push_back(std::move(task));
UNREACHABLE();
}
void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
......@@ -146,8 +238,8 @@ class MockPlatform : public v8::Platform {
bool IdleTasksEnabled(v8::Isolate* isolate) override { return true; }
std::unique_ptr<JobHandle> PostJob(
TaskPriority priority, std::unique_ptr<JobTask> job_state) override {
UNREACHABLE();
TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
return deferred_post_job_.DeferPostJob(priority, std::move(job_task));
}
double MonotonicallyIncreasingTime() override {
......@@ -165,127 +257,48 @@ class MockPlatform : public v8::Platform {
void RunIdleTask(double deadline_in_seconds, double time_step) {
time_step_ = time_step;
IdleTask* task;
std::unique_ptr<IdleTask> task;
{
base::MutexGuard lock(&mutex_);
task = idle_task_;
ASSERT_TRUE(idle_task_ != nullptr);
idle_task_ = nullptr;
base::MutexGuard lock(&idle_task_mutex_);
task.swap(idle_task_);
}
task->Run(deadline_in_seconds);
delete task;
}
bool IdleTaskPending() {
base::MutexGuard lock(&mutex_);
return idle_task_;
base::MutexGuard lock(&idle_task_mutex_);
return idle_task_ != nullptr;
}
bool WorkerTasksPending() {
base::MutexGuard lock(&mutex_);
return !worker_tasks_.empty();
}
bool JobTaskPending() { return deferred_post_job_.IsPending(); }
bool ForegroundTasksPending() {
base::MutexGuard lock(&mutex_);
return !foreground_tasks_.empty();
void RunJobTasksAndBlock(Platform* platform) {
deferred_post_job_.DoRealPostJob(platform);
deferred_post_job_.BlockUntilComplete();
}
void RunWorkerTasksAndBlock(Platform* platform) {
std::vector<std::unique_ptr<Task>> tasks;
{
base::MutexGuard lock(&mutex_);
tasks.swap(worker_tasks_);
}
platform->CallOnWorkerThread(
std::make_unique<TaskWrapper>(this, std::move(tasks), true));
sem_.Wait();
void RunJobTasks(Platform* platform) {
deferred_post_job_.DoRealPostJob(platform);
}
void RunWorkerTasks(Platform* platform) {
std::vector<std::unique_ptr<Task>> tasks;
{
base::MutexGuard lock(&mutex_);
tasks.swap(worker_tasks_);
}
platform->CallOnWorkerThread(
std::make_unique<TaskWrapper>(this, std::move(tasks), false));
}
void RunForegroundTasks() {
std::vector<std::unique_ptr<Task>> tasks;
{
base::MutexGuard lock(&mutex_);
tasks.swap(foreground_tasks_);
}
for (auto& task : tasks) {
task->Run();
// Reset |task| before running the next one.
task.reset();
}
}
void ClearWorkerTasks() {
std::vector<std::unique_ptr<Task>> tasks;
{
base::MutexGuard lock(&mutex_);
tasks.swap(worker_tasks_);
}
}
void ClearForegroundTasks() {
std::vector<std::unique_ptr<Task>> tasks;
{
base::MutexGuard lock(&mutex_);
tasks.swap(foreground_tasks_);
}
}
void ClearJobs() { deferred_post_job_.Clear(); }
void ClearIdleTask() {
base::MutexGuard lock(&mutex_);
ASSERT_TRUE(idle_task_ != nullptr);
delete idle_task_;
idle_task_ = nullptr;
}
private:
class TaskWrapper : public Task {
public:
TaskWrapper(MockPlatform* platform,
std::vector<std::unique_ptr<Task>> tasks, bool signal)
: platform_(platform), tasks_(std::move(tasks)), signal_(signal) {}
~TaskWrapper() override = default;
TaskWrapper(const TaskWrapper&) = delete;
TaskWrapper& operator=(const TaskWrapper&) = delete;
void Run() override {
for (auto& task : tasks_) {
task->Run();
// Reset |task| before running the next one.
task.reset();
}
if (signal_) platform_->sem_.Signal();
base::MutexGuard lock(&idle_task_mutex_);
CHECK_NOT_NULL(idle_task_);
idle_task_.reset();
}
private:
MockPlatform* platform_;
std::vector<std::unique_ptr<Task>> tasks_;
bool signal_;
};
class MockForegroundTaskRunner final : public TaskRunner {
public:
explicit MockForegroundTaskRunner(MockPlatform* platform)
: platform_(platform) {}
void PostTask(std::unique_ptr<v8::Task> task) override {
base::MutexGuard lock(&platform_->mutex_);
platform_->foreground_tasks_.push_back(std::move(task));
}
void PostTask(std::unique_ptr<v8::Task> task) override { UNREACHABLE(); }
void PostNonNestableTask(std::unique_ptr<v8::Task> task) override {
// The mock platform does not nest tasks.
PostTask(std::move(task));
UNREACHABLE();
}
void PostDelayedTask(std::unique_ptr<Task> task,
......@@ -295,9 +308,9 @@ class MockPlatform : public v8::Platform {
void PostIdleTask(std::unique_ptr<IdleTask> task) override {
DCHECK(IdleTasksEnabled());
base::MutexGuard lock(&platform_->mutex_);
base::MutexGuard lock(&platform_->idle_task_mutex_);
ASSERT_TRUE(platform_->idle_task_ == nullptr);
platform_->idle_task_ = task.release();
platform_->idle_task_ = std::move(task);
}
bool IdleTasksEnabled() override { return true; }
......@@ -311,14 +324,14 @@ class MockPlatform : public v8::Platform {
double time_;
double time_step_;
// Protects all *_tasks_.
base::Mutex mutex_;
// The posted JobTask.
DeferredPostJob deferred_post_job_;
IdleTask* idle_task_;
std::vector<std::unique_ptr<Task>> worker_tasks_;
std::vector<std::unique_ptr<Task>> foreground_tasks_;
// The posted idle task.
std::unique_ptr<IdleTask> idle_task_;
base::Semaphore sem_;
// Protects idle_task_.
base::Mutex idle_task_mutex_;
v8::TracingController* tracing_controller_;
};
......@@ -356,8 +369,7 @@ TEST_F(LazyCompilerDispatcherTest, IsEnqueued) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
platform.ClearWorkerTasks();
ASSERT_TRUE(platform.JobTaskPending());
}
TEST_F(LazyCompilerDispatcherTest, FinishNow) {
......@@ -378,7 +390,6 @@ TEST_F(LazyCompilerDispatcherTest, FinishNow) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
platform.ClearWorkerTasks();
ASSERT_FALSE(platform.IdleTaskPending());
dispatcher.AbortAll();
}
......@@ -394,10 +405,10 @@ TEST_F(LazyCompilerDispatcherTest, CompileAndFinalize) {
base::Optional<LazyCompileDispatcher::JobId> job_id =
EnqueueUnoptimizedCompileJob(&dispatcher, i_isolate(), shared);
ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_TRUE(platform.JobTaskPending());
// Run compile steps.
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
// Since we haven't yet registered the SFI for the job, it should still be
// enqueued and waiting.
......@@ -413,7 +424,7 @@ TEST_F(LazyCompilerDispatcherTest, CompileAndFinalize) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
ASSERT_FALSE(platform.IdleTaskPending());
dispatcher.AbortAll();
}
......@@ -432,7 +443,7 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskNoIdleTime) {
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
// Run compile steps.
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
// Job should be ready to finalize.
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
......@@ -456,7 +467,7 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskNoIdleTime) {
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
dispatcher.AbortAll();
}
......@@ -480,7 +491,7 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskSmallIdleTime) {
dispatcher.RegisterSharedFunctionInfo(*job_id_2, *shared_2);
// Run compile steps.
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
// Both jobs should be ready to finalize.
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
......@@ -506,7 +517,7 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskSmallIdleTime) {
dispatcher.IsEnqueued(shared_2));
ASSERT_TRUE(shared_1->is_compiled() && shared_2->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
dispatcher.AbortAll();
}
......@@ -531,13 +542,12 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskException) {
dispatcher.RegisterSharedFunctionInfo(*job_id, *shared);
// Run compile steps and finalize.
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
platform.RunIdleTask(1000.0, 0.0);
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(i_isolate()->has_pending_exception());
platform.ClearWorkerTasks();
dispatcher.AbortAll();
}
......@@ -560,17 +570,17 @@ TEST_F(LazyCompilerDispatcherTest, FinishNowWithWorkerTask) {
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_TRUE(platform.JobTaskPending());
// This does not block, but races with the FinishNow() call below.
platform.RunWorkerTasks(V8::GetCurrentPlatform());
platform.RunJobTasks(V8::GetCurrentPlatform());
ASSERT_TRUE(dispatcher.FinishNow(shared));
// Finishing removes the SFI from the queue.
ASSERT_FALSE(dispatcher.IsEnqueued(shared));
ASSERT_TRUE(shared->is_compiled());
if (platform.IdleTaskPending()) platform.ClearIdleTask();
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
dispatcher.AbortAll();
}
......@@ -597,7 +607,7 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskMultipleJobs) {
ASSERT_TRUE(dispatcher.IsEnqueued(shared_2));
// Run compile steps and finalize.
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
platform.RunIdleTask(1000.0, 0.0);
ASSERT_FALSE(dispatcher.IsEnqueued(shared_1));
......@@ -605,7 +615,7 @@ TEST_F(LazyCompilerDispatcherTest, IdleTaskMultipleJobs) {
ASSERT_TRUE(shared_1->is_compiled());
ASSERT_TRUE(shared_2->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
dispatcher.AbortAll();
}
......@@ -637,7 +647,6 @@ TEST_F(LazyCompilerDispatcherTest, FinishNowException) {
i_isolate()->clear_pending_exception();
ASSERT_FALSE(platform.IdleTaskPending());
platform.ClearWorkerTasks();
dispatcher.AbortAll();
}
......@@ -659,7 +668,7 @@ TEST_F(LazyCompilerDispatcherTest, AbortJobNotStarted) {
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_TRUE(platform.JobTaskPending());
dispatcher.AbortJob(*job_id);
......@@ -667,7 +676,6 @@ TEST_F(LazyCompilerDispatcherTest, AbortJobNotStarted) {
ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
platform.ClearWorkerTasks();
dispatcher.AbortAll();
}
......@@ -689,7 +697,7 @@ TEST_F(LazyCompilerDispatcherTest, AbortJobAlreadyStarted) {
ASSERT_FALSE(shared->is_compiled());
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_FALSE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_TRUE(platform.JobTaskPending());
// Have dispatcher block on the background thread when running the job.
{
......@@ -698,7 +706,7 @@ TEST_F(LazyCompilerDispatcherTest, AbortJobAlreadyStarted) {
}
// Start background thread and wait until it is about to run the job.
platform.RunWorkerTasks(V8::GetCurrentPlatform());
platform.RunJobTasks(V8::GetCurrentPlatform());
while (dispatcher.block_for_testing_.Value()) {
}
......@@ -722,7 +730,7 @@ TEST_F(LazyCompilerDispatcherTest, AbortJobAlreadyStarted) {
ASSERT_EQ(dispatcher.jobs_.size(), 1u);
ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE(dispatcher.jobs_.begin()->second->aborted);
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
ASSERT_TRUE(platform.IdleTaskPending());
// Runt the pending idle task
......@@ -732,7 +740,7 @@ TEST_F(LazyCompilerDispatcherTest, AbortJobAlreadyStarted) {
ASSERT_FALSE(dispatcher.IsEnqueued(*job_id));
ASSERT_FALSE(shared->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
dispatcher.AbortAll();
}
......@@ -825,12 +833,12 @@ TEST_F(LazyCompilerDispatcherTest, CompileMultipleOnBackgroundThread) {
ASSERT_FALSE(shared_1->is_compiled());
ASSERT_FALSE(shared_2->is_compiled());
ASSERT_FALSE(platform.IdleTaskPending());
ASSERT_TRUE(platform.WorkerTasksPending());
ASSERT_TRUE(platform.JobTaskPending());
platform.RunWorkerTasksAndBlock(V8::GetCurrentPlatform());
platform.RunJobTasksAndBlock(V8::GetCurrentPlatform());
ASSERT_TRUE(platform.IdleTaskPending());
ASSERT_FALSE(platform.WorkerTasksPending());
ASSERT_FALSE(platform.JobTaskPending());
ASSERT_EQ(dispatcher.jobs_.size(), 2u);
ASSERT_TRUE(dispatcher.jobs_.begin()->second->has_run);
ASSERT_TRUE((++dispatcher.jobs_.begin())->second->has_run);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment