optimizing-compile-dispatcher.cc 6.9 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/optimizing-compile-dispatcher.h"
6

7
#include "src/base/atomicops.h"
8
#include "src/full-codegen/full-codegen.h"
9
#include "src/isolate.h"
10
#include "src/tracing/trace-event.h"
11
#include "src/v8.h"
12 13 14 15

namespace v8 {
namespace internal {

16 17 18 19 20 21 22
namespace {

void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
                                bool restore_function_code) {
  // The recompile job is allocated in the CompilationInfo's zone.
  CompilationInfo* info = job->info();
  if (restore_function_code) {
23 24
    Handle<JSFunction> function = info->closure();
    function->ReplaceCode(function->shared()->code());
25 26 27 28 29 30 31
  }
  delete info;
}

}  // namespace


32
class OptimizingCompileDispatcher::CompileTask : public v8::Task {
33
 public:
34
  explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
35 36 37 38
    OptimizingCompileDispatcher* dispatcher =
        isolate_->optimizing_compile_dispatcher();
    base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
    ++dispatcher->ref_count_;
39
  }
40 41 42 43 44

  virtual ~CompileTask() {}

 private:
  // v8::Task overrides.
45
  void Run() override {
46 47 48 49
    DisallowHeapAllocation no_allocation;
    DisallowHandleAllocation no_handles;
    DisallowHandleDereference no_deref;

50 51
    OptimizingCompileDispatcher* dispatcher =
        isolate_->optimizing_compile_dispatcher();
52 53
    {
      TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
54
      TRACE_EVENT0("v8", "V8.RecompileConcurrent");
55

56
      if (dispatcher->recompilation_delay_ != 0) {
57 58
        base::OS::Sleep(base::TimeDelta::FromMilliseconds(
            dispatcher->recompilation_delay_));
59
      }
60

61
      dispatcher->CompileNext(dispatcher->NextInput(true));
62
    }
63
    {
64 65 66
      base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
      if (--dispatcher->ref_count_ == 0) {
        dispatcher->ref_count_zero_.NotifyOne();
67
      }
68 69 70 71 72 73 74 75 76
    }
  }

  Isolate* isolate_;

  DISALLOW_COPY_AND_ASSIGN(CompileTask);
};


77
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
78 79 80 81
#ifdef DEBUG
  {
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    DCHECK_EQ(0, ref_count_);
82
  }
83
#endif
84
  DCHECK_EQ(0, input_queue_length_);
85 86 87
  DeleteArray(input_queue_);
}

88

89
OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
90
    bool check_if_flushing) {
91
  base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
92
  if (input_queue_length_ == 0) return NULL;
93
  OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
94
  DCHECK_NOT_NULL(job);
95 96
  input_queue_shift_ = InputQueueIndex(1);
  input_queue_length_--;
97
  if (check_if_flushing) {
98
    if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
99 100 101 102
      if (!job->info()->is_osr()) {
        AllowHandleDereference allow_handle_dereference;
        DisposeOptimizedCompileJob(job, true);
      }
103
      return NULL;
104
    }
105
  }
106 107 108 109
  return job;
}


110
void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
111
  if (!job) return;
112

113
  // The function may have already been optimized by OSR.  Simply continue.
114
  OptimizedCompileJob::Status status = job->OptimizeGraph();
115
  USE(status);  // Prevent an unused-variable error in release mode.
116
  DCHECK(status != OptimizedCompileJob::FAILED);
117

118
  // The function may have already been optimized by OSR.  Simply continue.
119 120
  // Use a mutex to make sure that functions marked for install
  // are always also queued.
121 122
  base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
  output_queue_.push(job);
123 124 125 126
  isolate_->stack_guard()->RequestInstallCode();
}


127 128 129 130 131 132 133 134
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
  for (;;) {
    OptimizedCompileJob* job = NULL;
    {
      base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
135
    }
136

137 138
    // OSR jobs are dealt with separately.
    if (!job->info()->is_osr()) {
139
      DisposeOptimizedCompileJob(job, restore_function_code);
140 141
    }
  }
142
}
143

144

145 146
void OptimizingCompileDispatcher::Flush() {
  base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
147
  if (FLAG_block_concurrent_recompilation) Unblock();
148
  {
149 150
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
151
    base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
152
  }
153
  FlushOutputQueue(true);
154
  if (FLAG_trace_concurrent_recompilation) {
155 156
    PrintF("  ** Flushed concurrent recompilation queues.\n");
  }
157 158 159
}


160 161
void OptimizingCompileDispatcher::Stop() {
  base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
162
  if (FLAG_block_concurrent_recompilation) Unblock();
163
  {
164 165
    base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
166
    base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
167 168 169
  }

  if (recompilation_delay_ != 0) {
170 171
    // At this point the optimizing compiler thread's event loop has stopped.
    // There is no need for a mutex when reading input_queue_length_.
172
    while (input_queue_length_ > 0) CompileNext(NextInput());
173 174
    InstallOptimizedFunctions();
  } else {
175
    FlushOutputQueue(false);
176
  }
177 178 179
}


180
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
181
  HandleScope handle_scope(isolate_);
182

183 184 185 186 187 188 189 190
  for (;;) {
    OptimizedCompileJob* job = NULL;
    {
      base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
    }
191
    CompilationInfo* info = job->info();
192
    Handle<JSFunction> function(*info->closure());
193 194 195
    if (function->IsOptimized()) {
      if (FLAG_trace_concurrent_recompilation) {
        PrintF("  ** Aborting compilation for ");
196
        function->ShortPrint();
197
        PrintF(" as it has already been optimized.\n");
198
      }
199
      DisposeOptimizedCompileJob(job, false);
200
    } else {
201
      Compiler::FinalizeOptimizedCompileJob(job);
202
    }
203
  }
204 205 206
}


207 208
void OptimizingCompileDispatcher::QueueForOptimization(
    OptimizedCompileJob* job) {
209
  DCHECK(IsQueueAvailable());
210
  {
211
    // Add job to the back of the input queue.
212
    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
213
    DCHECK_LT(input_queue_length_, input_queue_capacity_);
214 215
    input_queue_[InputQueueIndex(input_queue_length_)] = job;
    input_queue_length_++;
216
  }
217
  if (FLAG_block_concurrent_recompilation) {
218
    blocked_jobs_++;
219
  } else {
220 221
    V8::GetCurrentPlatform()->CallOnBackgroundThread(
        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
222 223 224 225
  }
}


226
void OptimizingCompileDispatcher::Unblock() {
227
  while (blocked_jobs_ > 0) {
228 229
    V8::GetCurrentPlatform()->CallOnBackgroundThread(
        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
230 231
    blocked_jobs_--;
  }
232 233
}

234

235 236
}  // namespace internal
}  // namespace v8