Commit 40a1f822 authored by jochen@chromium.org's avatar jochen@chromium.org

Introduce --job-based-recompilation flag

The implementation is not yet complete, it doesn't support blocking yet,
and doesn't collect statistics.

This things will be fixed in follow-up CLs.

BUG=v8:3608
R=yangguo@chromium.org
LOG=n

Review URL: https://codereview.chromium.org/620093003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24425 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f98b7ecd
...@@ -310,6 +310,11 @@ DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops") ...@@ -310,6 +310,11 @@ DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops")
DEFINE_BOOL(concurrent_recompilation, true, DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread") "optimizing hot functions asynchronously on a separate thread")
DEFINE_BOOL(job_based_recompilation, false,
"post tasks to v8::Platform instead of using a thread for "
"concurrent recompilation")
DEFINE_IMPLICATION(job_based_recompilation, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(job_based_recompilation, block_concurrent_recompilation)
DEFINE_BOOL(trace_concurrent_recompilation, false, DEFINE_BOOL(trace_concurrent_recompilation, false,
"track concurrent recompilation") "track concurrent recompilation")
DEFINE_INT(concurrent_recompilation_queue_length, 8, DEFINE_INT(concurrent_recompilation_queue_length, 8,
......
...@@ -15,6 +15,50 @@ ...@@ -15,6 +15,50 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class OptimizingCompilerThread::CompileTask : public v8::Task {
public:
CompileTask(Isolate* isolate, OptimizedCompileJob* job)
: isolate_(isolate), job_(job) {}
virtual ~CompileTask() {}
private:
// v8::Task overrides.
virtual void Run() OVERRIDE {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
// The function may have already been optimized by OSR. Simply continue.
OptimizedCompileJob::Status status = job_->OptimizeGraph();
USE(status); // Prevent an unused-variable error in release mode.
DCHECK(status != OptimizedCompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
{
base::LockGuard<base::Mutex> lock_guard(
&isolate_->optimizing_compiler_thread()->output_queue_mutex_);
isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_);
}
isolate_->stack_guard()->RequestInstallCode();
{
base::LockGuard<base::Mutex> lock_guard(
&isolate_->optimizing_compiler_thread()->input_queue_mutex_);
isolate_->optimizing_compiler_thread()->input_queue_length_--;
}
isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal();
}
Isolate* isolate_;
OptimizedCompileJob* job_;
DISALLOW_COPY_AND_ASSIGN(CompileTask);
};
OptimizingCompilerThread::~OptimizingCompilerThread() { OptimizingCompilerThread::~OptimizingCompilerThread() {
DCHECK_EQ(0, input_queue_length_); DCHECK_EQ(0, input_queue_length_);
DeleteArray(input_queue_); DeleteArray(input_queue_);
...@@ -40,6 +84,10 @@ void OptimizingCompilerThread::Run() { ...@@ -40,6 +84,10 @@ void OptimizingCompilerThread::Run() {
DisallowHandleAllocation no_handles; DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref; DisallowHandleDereference no_deref;
if (FLAG_job_based_recompilation) {
return;
}
base::ElapsedTimer total_timer; base::ElapsedTimer total_timer;
if (FLAG_trace_concurrent_recompilation) total_timer.Start(); if (FLAG_trace_concurrent_recompilation) total_timer.Start();
...@@ -86,6 +134,7 @@ void OptimizingCompilerThread::Run() { ...@@ -86,6 +134,7 @@ void OptimizingCompilerThread::Run() {
OptimizedCompileJob* OptimizingCompilerThread::NextInput() { OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
DCHECK(!FLAG_job_based_recompilation);
if (input_queue_length_ == 0) return NULL; if (input_queue_length_ == 0) return NULL;
OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NE(NULL, job); DCHECK_NE(NULL, job);
...@@ -134,6 +183,7 @@ static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, ...@@ -134,6 +183,7 @@ static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
DCHECK(!FLAG_job_based_recompilation);
OptimizedCompileJob* job; OptimizedCompileJob* job;
while ((job = NextInput())) { while ((job = NextInput())) {
// This should not block, since we have one signal on the input queue // This should not block, since we have one signal on the input queue
...@@ -172,8 +222,10 @@ void OptimizingCompilerThread::Flush() { ...@@ -172,8 +222,10 @@ void OptimizingCompilerThread::Flush() {
DCHECK(!IsOptimizerThread()); DCHECK(!IsOptimizerThread());
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock(); if (FLAG_block_concurrent_recompilation) Unblock();
if (!FLAG_job_based_recompilation) {
input_queue_semaphore_.Signal(); input_queue_semaphore_.Signal();
stop_semaphore_.Wait(); stop_semaphore_.Wait();
}
FlushOutputQueue(true); FlushOutputQueue(true);
if (FLAG_concurrent_osr) FlushOsrBuffer(true); if (FLAG_concurrent_osr) FlushOsrBuffer(true);
if (FLAG_trace_concurrent_recompilation) { if (FLAG_trace_concurrent_recompilation) {
...@@ -186,10 +238,20 @@ void OptimizingCompilerThread::Stop() { ...@@ -186,10 +238,20 @@ void OptimizingCompilerThread::Stop() {
DCHECK(!IsOptimizerThread()); DCHECK(!IsOptimizerThread());
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
if (FLAG_block_concurrent_recompilation) Unblock(); if (FLAG_block_concurrent_recompilation) Unblock();
if (!FLAG_job_based_recompilation) {
input_queue_semaphore_.Signal(); input_queue_semaphore_.Signal();
stop_semaphore_.Wait(); stop_semaphore_.Wait();
}
if (FLAG_concurrent_recompilation_delay != 0) { if (FLAG_job_based_recompilation) {
while (true) {
{
base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
if (!input_queue_length_) break;
}
input_queue_semaphore_.Wait();
}
} else if (FLAG_concurrent_recompilation_delay != 0) {
// At this point the optimizing compiler thread's event loop has stopped. // At this point the optimizing compiler thread's event loop has stopped.
// There is no need for a mutex when reading input_queue_length_. // There is no need for a mutex when reading input_queue_length_.
while (input_queue_length_ > 0) CompileNext(); while (input_queue_length_ > 0) CompileNext();
...@@ -274,7 +336,10 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { ...@@ -274,7 +336,10 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
input_queue_[InputQueueIndex(input_queue_length_)] = job; input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++; input_queue_length_++;
} }
if (FLAG_block_concurrent_recompilation) { if (FLAG_job_based_recompilation) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompileTask(isolate_, job), v8::Platform::kShortRunningTask);
} else if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++; blocked_jobs_++;
} else { } else {
input_queue_semaphore_.Signal(); input_queue_semaphore_.Signal();
...@@ -284,6 +349,9 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { ...@@ -284,6 +349,9 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
void OptimizingCompilerThread::Unblock() { void OptimizingCompilerThread::Unblock() {
DCHECK(!IsOptimizerThread()); DCHECK(!IsOptimizerThread());
if (FLAG_job_based_recompilation) {
return;
}
while (blocked_jobs_ > 0) { while (blocked_jobs_ > 0) {
input_queue_semaphore_.Signal(); input_queue_semaphore_.Signal();
blocked_jobs_--; blocked_jobs_--;
......
...@@ -84,6 +84,8 @@ class OptimizingCompilerThread : public base::Thread { ...@@ -84,6 +84,8 @@ class OptimizingCompilerThread : public base::Thread {
#endif #endif
private: private:
class CompileTask;
enum StopFlag { CONTINUE, STOP, FLUSH }; enum StopFlag { CONTINUE, STOP, FLUSH };
void FlushInputQueue(bool restore_function_code); void FlushInputQueue(bool restore_function_code);
...@@ -121,6 +123,9 @@ class OptimizingCompilerThread : public base::Thread { ...@@ -121,6 +123,9 @@ class OptimizingCompilerThread : public base::Thread {
// Queue of recompilation tasks ready to be installed (excluding OSR). // Queue of recompilation tasks ready to be installed (excluding OSR).
UnboundQueue<OptimizedCompileJob*> output_queue_; UnboundQueue<OptimizedCompileJob*> output_queue_;
// Used for job based recompilation which has multiple producers on
// different threads.
base::Mutex output_queue_mutex_;
// Cyclic buffer of recompilation tasks for OSR. // Cyclic buffer of recompilation tasks for OSR.
OptimizedCompileJob** osr_buffer_; OptimizedCompileJob** osr_buffer_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment