optimizing-compile-dispatcher.cc 6.73 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/optimizing-compile-dispatcher.h"
6

7
#include "src/base/atomicops.h"
8
#include "src/full-codegen/full-codegen.h"
9
#include "src/isolate.h"
10
#include "src/tracing/trace-event.h"
11
#include "src/v8.h"
12 13 14 15

namespace v8 {
namespace internal {

16 17
namespace {

18
void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
19
  if (restore_function_code) {
20
    Handle<JSFunction> function = job->info()->closure();
21
    function->ReplaceCode(function->shared()->code());
22 23 24 25
    // TODO(mvstanton): We can't call ensureliterals here due to allocation,
    // but we probably shouldn't call ReplaceCode either, as this
    // sometimes runs on the worker thread!
    // JSFunction::EnsureLiterals(function);
26
  }
27
  delete job;
28 29 30 31 32
}

}  // namespace


33
class OptimizingCompileDispatcher::CompileTask : public v8::Task {
34
 public:
35
  explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
36 37 38 39
    OptimizingCompileDispatcher* dispatcher =
        isolate_->optimizing_compile_dispatcher();
    base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
    ++dispatcher->ref_count_;
40
  }
41 42 43 44 45

  virtual ~CompileTask() {}

 private:
  // v8::Task overrides.
46
  void Run() override {
47 48 49 50
    DisallowHeapAllocation no_allocation;
    DisallowHandleAllocation no_handles;
    DisallowHandleDereference no_deref;

51 52
    OptimizingCompileDispatcher* dispatcher =
        isolate_->optimizing_compile_dispatcher();
53 54
    {
      TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
55
      TRACE_EVENT0("v8", "V8.RecompileConcurrent");
56

57
      if (dispatcher->recompilation_delay_ != 0) {
58 59
        base::OS::Sleep(base::TimeDelta::FromMilliseconds(
            dispatcher->recompilation_delay_));
60
      }
61

62
      dispatcher->CompileNext(dispatcher->NextInput(true));
63
    }
64
    {
65 66 67
      base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
      if (--dispatcher->ref_count_ == 0) {
        dispatcher->ref_count_zero_.NotifyOne();
68
      }
69 70 71 72 73 74 75 76 77
    }
  }

  Isolate* isolate_;

  DISALLOW_COPY_AND_ASSIGN(CompileTask);
};


78
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
79 80 81 82
#ifdef DEBUG
  {
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    DCHECK_EQ(0, ref_count_);
83
  }
84
#endif
85
  DCHECK_EQ(0, input_queue_length_);
86 87 88
  DeleteArray(input_queue_);
}

89
CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
90
  base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
91
  if (input_queue_length_ == 0) return NULL;
92
  CompilationJob* job = input_queue_[InputQueueIndex(0)];
93
  DCHECK_NOT_NULL(job);
94 95
  input_queue_shift_ = InputQueueIndex(1);
  input_queue_length_--;
96
  if (check_if_flushing) {
97
    if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
98
      AllowHandleDereference allow_handle_dereference;
99
      DisposeCompilationJob(job, true);
100
      return NULL;
101
    }
102
  }
103 104 105
  return job;
}

106
void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
107
  if (!job) return;
108

109
  // The function may have already been optimized by OSR.  Simply continue.
110
  CompilationJob::Status status = job->OptimizeGraph();
111
  USE(status);  // Prevent an unused-variable error.
112

113
  // The function may have already been optimized by OSR.  Simply continue.
114 115
  // Use a mutex to make sure that functions marked for install
  // are always also queued.
116 117
  base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
  output_queue_.push(job);
118 119 120 121
  isolate_->stack_guard()->RequestInstallCode();
}


122 123
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
  for (;;) {
124
    CompilationJob* job = NULL;
125 126 127 128 129
    {
      base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
130
    }
131

132
    DisposeCompilationJob(job, restore_function_code);
133
  }
134
}
135

136

137 138
void OptimizingCompileDispatcher::Flush() {
  base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
139
  if (FLAG_block_concurrent_recompilation) Unblock();
140
  {
141 142
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
143
    base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
144
  }
145
  FlushOutputQueue(true);
146
  if (FLAG_trace_concurrent_recompilation) {
147 148
    PrintF("  ** Flushed concurrent recompilation queues.\n");
  }
149 150 151
}


152 153
void OptimizingCompileDispatcher::Stop() {
  base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
154
  if (FLAG_block_concurrent_recompilation) Unblock();
155
  {
156 157
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
158
    base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
159 160 161
  }

  if (recompilation_delay_ != 0) {
162 163
    // At this point the optimizing compiler thread's event loop has stopped.
    // There is no need for a mutex when reading input_queue_length_.
164
    while (input_queue_length_ > 0) CompileNext(NextInput());
165 166
    InstallOptimizedFunctions();
  } else {
167
    FlushOutputQueue(false);
168
  }
169 170 171
}


172
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
173
  HandleScope handle_scope(isolate_);
174

175
  for (;;) {
176
    CompilationJob* job = NULL;
177 178 179 180 181 182
    {
      base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
    }
183
    CompilationInfo* info = job->info();
184
    Handle<JSFunction> function(*info->closure());
185 186 187
    if (function->IsOptimized()) {
      if (FLAG_trace_concurrent_recompilation) {
        PrintF("  ** Aborting compilation for ");
188
        function->ShortPrint();
189
        PrintF(" as it has already been optimized.\n");
190
      }
191
      DisposeCompilationJob(job, false);
192
    } else {
193
      Compiler::FinalizeCompilationJob(job);
194
    }
195
  }
196 197
}

198
void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
199
  DCHECK(IsQueueAvailable());
200
  {
201
    // Add job to the back of the input queue.
202
    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
203
    DCHECK_LT(input_queue_length_, input_queue_capacity_);
204 205
    input_queue_[InputQueueIndex(input_queue_length_)] = job;
    input_queue_length_++;
206
  }
207
  if (FLAG_block_concurrent_recompilation) {
208
    blocked_jobs_++;
209
  } else {
210 211
    V8::GetCurrentPlatform()->CallOnBackgroundThread(
        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
212 213 214 215
  }
}


216
void OptimizingCompileDispatcher::Unblock() {
217
  while (blocked_jobs_ > 0) {
218 219
    V8::GetCurrentPlatform()->CallOnBackgroundThread(
        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
220 221
    blocked_jobs_--;
  }
222 223
}

224

225 226
}  // namespace internal
}  // namespace v8