optimizing-compile-dispatcher.cc 11 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/optimizing-compile-dispatcher.h"
6

7
#include "src/v8.h"
8

9
#include "src/base/atomicops.h"
10 11 12
#include "src/full-codegen.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
13 14 15 16

namespace v8 {
namespace internal {

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
namespace {

void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
                                bool restore_function_code) {
  // The recompile job is allocated in the CompilationInfo's zone.
  CompilationInfo* info = job->info();
  if (restore_function_code) {
    if (info->is_osr()) {
      if (!job->IsWaitingForInstall()) {
        // Remove stack check that guards OSR entry on original code.
        Handle<Code> code = info->unoptimized_code();
        uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
        BackEdgeTable::RemoveStackCheck(code, offset);
      }
    } else {
      Handle<JSFunction> function = info->closure();
      function->ReplaceCode(function->shared()->code());
    }
  }
  delete info;
}

}  // namespace


42
class OptimizingCompileDispatcher::CompileTask : public v8::Task {
43
 public:
44
  explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
45 46 47 48
    OptimizingCompileDispatcher* dispatcher =
        isolate_->optimizing_compile_dispatcher();
    base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
    ++dispatcher->ref_count_;
49
  }
50 51 52 53 54

  virtual ~CompileTask() {}

 private:
  // v8::Task overrides.
55
  void Run() override {
56 57 58 59
    DisallowHeapAllocation no_allocation;
    DisallowHandleAllocation no_handles;
    DisallowHandleDereference no_deref;

60 61
    OptimizingCompileDispatcher* dispatcher =
        isolate_->optimizing_compile_dispatcher();
62 63 64
    {
      TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);

65 66
      if (dispatcher->recompilation_delay_ != 0) {
        base::OS::Sleep(dispatcher->recompilation_delay_);
67
      }
68

69
      dispatcher->CompileNext(dispatcher->NextInput(true));
70
    }
71
    {
72 73 74
      base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
      if (--dispatcher->ref_count_ == 0) {
        dispatcher->ref_count_zero_.NotifyOne();
75
      }
76 77 78 79 80 81 82 83 84
    }
  }

  Isolate* isolate_;

  DISALLOW_COPY_AND_ASSIGN(CompileTask);
};


85
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
86 87 88 89
#ifdef DEBUG
  {
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    DCHECK_EQ(0, ref_count_);
90
  }
91
#endif
92
  DCHECK_EQ(0, input_queue_length_);
93 94 95 96
  DeleteArray(input_queue_);
  if (FLAG_concurrent_osr) {
#ifdef DEBUG
    for (int i = 0; i < osr_buffer_capacity_; i++) {
97
      CHECK_NULL(osr_buffer_[i]);
98 99 100 101 102 103
    }
#endif
    DeleteArray(osr_buffer_);
  }
}

104

105
OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
106
    bool check_if_flushing) {
107
  base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
108
  if (input_queue_length_ == 0) return NULL;
109
  OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
110
  DCHECK_NOT_NULL(job);
111 112
  input_queue_shift_ = InputQueueIndex(1);
  input_queue_length_--;
113
  if (check_if_flushing) {
114
    if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
115 116 117 118
      if (!job->info()->is_osr()) {
        AllowHandleDereference allow_handle_dereference;
        DisposeOptimizedCompileJob(job, true);
      }
119
      return NULL;
120
    }
121
  }
122 123 124 125
  return job;
}


126
void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
127
  if (!job) return;
128

129
  // The function may have already been optimized by OSR.  Simply continue.
130
  OptimizedCompileJob::Status status = job->OptimizeGraph();
131
  USE(status);  // Prevent an unused-variable error in release mode.
132
  DCHECK(status != OptimizedCompileJob::FAILED);
133

134
  // The function may have already been optimized by OSR.  Simply continue.
135 136
  // Use a mutex to make sure that functions marked for install
  // are always also queued.
137 138
  base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
  output_queue_.push(job);
139 140 141 142
  isolate_->stack_guard()->RequestInstallCode();
}


143 144 145 146 147 148 149 150
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
  for (;;) {
    OptimizedCompileJob* job = NULL;
    {
      base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
151
    }
152

153 154
    // OSR jobs are dealt with separately.
    if (!job->info()->is_osr()) {
155
      DisposeOptimizedCompileJob(job, restore_function_code);
156 157
    }
  }
158
}
159

160

161
void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
162 163
  for (int i = 0; i < osr_buffer_capacity_; i++) {
    if (osr_buffer_[i] != NULL) {
164
      DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
165 166
      osr_buffer_[i] = NULL;
    }
167
  }
168 169 170
}


171 172
void OptimizingCompileDispatcher::Flush() {
  base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
173
  if (FLAG_block_concurrent_recompilation) Unblock();
174
  {
175 176
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
177
    base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
178
  }
179
  FlushOutputQueue(true);
180
  if (FLAG_concurrent_osr) FlushOsrBuffer(true);
181
  if (FLAG_trace_concurrent_recompilation) {
182 183
    PrintF("  ** Flushed concurrent recompilation queues.\n");
  }
184 185 186
}


187 188
void OptimizingCompileDispatcher::Stop() {
  base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
189
  if (FLAG_block_concurrent_recompilation) Unblock();
190
  {
191 192
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
193
    base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
194 195 196
  }

  if (recompilation_delay_ != 0) {
197 198
    // At this point the optimizing compiler thread's event loop has stopped.
    // There is no need for a mutex when reading input_queue_length_.
199
    while (input_queue_length_ > 0) CompileNext(NextInput());
200 201
    InstallOptimizedFunctions();
  } else {
202
    FlushOutputQueue(false);
203 204
  }

205 206
  if (FLAG_concurrent_osr) FlushOsrBuffer(false);

207 208
  if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
      FLAG_concurrent_osr) {
209 210
    PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
  }
211 212 213
}


214
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
215
  HandleScope handle_scope(isolate_);
216

217 218 219 220 221 222 223 224
  for (;;) {
    OptimizedCompileJob* job = NULL;
    {
      base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
    }
225
    CompilationInfo* info = job->info();
226
    Handle<JSFunction> function(*info->closure());
227
    if (info->is_osr()) {
228 229
      if (FLAG_trace_osr) {
        PrintF("[COSR - ");
230
        function->ShortPrint();
231 232 233
        PrintF(" is ready for install and entry at AST id %d]\n",
               info->osr_ast_id().ToInt());
      }
234
      job->WaitForInstall();
235 236 237 238
      // Remove stack check that guards OSR entry on original code.
      Handle<Code> code = info->unoptimized_code();
      uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
      BackEdgeTable::RemoveStackCheck(code, offset);
239
    } else {
240
      if (function->IsOptimized()) {
241
        if (FLAG_trace_concurrent_recompilation) {
242 243 244 245
          PrintF("  ** Aborting compilation for ");
          function->ShortPrint();
          PrintF(" as it has already been optimized.\n");
        }
246 247 248
        DisposeOptimizedCompileJob(job, false);
      } else {
        Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
249 250
        function->ReplaceCode(code.is_null() ? function->shared()->code()
                                             : *code);
251
      }
252
    }
253
  }
254 255 256
}


257 258
void OptimizingCompileDispatcher::QueueForOptimization(
    OptimizedCompileJob* job) {
259
  DCHECK(IsQueueAvailable());
260
  CompilationInfo* info = job->info();
261
  if (info->is_osr()) {
262
    osr_attempts_++;
263 264
    AddToOsrBuffer(job);
    // Add job to the front of the input queue.
265
    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
266
    DCHECK_LT(input_queue_length_, input_queue_capacity_);
267 268 269 270
    // Move shift_ back by one.
    input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
    input_queue_[InputQueueIndex(0)] = job;
    input_queue_length_++;
271
  } else {
272
    // Add job to the back of the input queue.
273
    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
274
    DCHECK_LT(input_queue_length_, input_queue_capacity_);
275 276
    input_queue_[InputQueueIndex(input_queue_length_)] = job;
    input_queue_length_++;
277
  }
278
  if (FLAG_block_concurrent_recompilation) {
279
    blocked_jobs_++;
280
  } else {
281 282
    V8::GetCurrentPlatform()->CallOnBackgroundThread(
        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
283 284 285 286
  }
}


287
void OptimizingCompileDispatcher::Unblock() {
288
  while (blocked_jobs_ > 0) {
289 290
    V8::GetCurrentPlatform()->CallOnBackgroundThread(
        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
291 292
    blocked_jobs_--;
  }
293 294
}

295

296
OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
297
    Handle<JSFunction> function, BailoutId osr_ast_id) {
298
  for (int i = 0; i < osr_buffer_capacity_; i++) {
299
    OptimizedCompileJob* current = osr_buffer_[i];
300
    if (current != NULL && current->IsWaitingForInstall() &&
301
        current->info()->HasSameOsrEntry(function, osr_ast_id)) {
302 303
      osr_hits_++;
      osr_buffer_[i] = NULL;
304
      return current;
305 306
    }
  }
307
  return NULL;
308 309 310
}


311 312
bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
                                                 BailoutId osr_ast_id) {
313
  for (int i = 0; i < osr_buffer_capacity_; i++) {
314
    OptimizedCompileJob* current = osr_buffer_[i];
315
    if (current != NULL &&
316
        current->info()->HasSameOsrEntry(function, osr_ast_id)) {
317
      return !current->IsWaitingForInstall();
318 319 320 321 322 323
    }
  }
  return false;
}


324
bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
325
  for (int i = 0; i < osr_buffer_capacity_; i++) {
326
    OptimizedCompileJob* current = osr_buffer_[i];
327 328
    if (current != NULL && *current->info()->closure() == function) {
      return !current->IsWaitingForInstall();
329 330 331 332 333 334
    }
  }
  return false;
}


335
void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
336
  // Find the next slot that is empty or has a stale job.
337
  OptimizedCompileJob* stale = NULL;
338
  while (true) {
339
    stale = osr_buffer_[osr_buffer_cursor_];
340 341
    if (stale == NULL || stale->IsWaitingForInstall()) break;
    osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
342
  }
343

344
  // Add to found slot and dispose the evicted job.
345
  if (stale != NULL) {
346
    DCHECK(stale->IsWaitingForInstall());
347
    CompilationInfo* info = stale->info();
348 349 350 351 352
    if (FLAG_trace_osr) {
      PrintF("[COSR - Discarded ");
      info->closure()->PrintName();
      PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
    }
353
    DisposeOptimizedCompileJob(stale, false);
354 355 356
  }
  osr_buffer_[osr_buffer_cursor_] = job;
  osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
357
}
358
}
359
}  // namespace v8::internal