optimizing-compile-dispatcher.cc 7.99 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6

7
#include "src/base/atomicops.h"
8 9
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
10
#include "src/execution/isolate.h"
11
#include "src/init/v8.h"
12 13
#include "src/logging/counters.h"
#include "src/logging/log.h"
14
#include "src/objects/objects-inl.h"
15
#include "src/tasks/cancelable-task.h"
16
#include "src/tracing/trace-event.h"
17 18 19 20

namespace v8 {
namespace internal {

21 22
namespace {

23 24
void DisposeCompilationJob(OptimizedCompilationJob* job,
                           bool restore_function_code) {
25
  if (restore_function_code) {
26
    Handle<JSFunction> function = job->compilation_info()->closure();
27
    function->set_code(function->shared().GetCode());
28 29 30
    if (function->IsInOptimizationQueue()) {
      function->ClearOptimizationMarker();
    }
31 32
    // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
    // allocation, but we probably shouldn't call set_code either, as this
33
    // sometimes runs on the worker thread!
34
    // JSFunction::EnsureFeedbackVector(function);
35
  }
36
  delete job;
37 38 39 40
}

}  // namespace

41
class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
42
 public:
43 44
  explicit CompileTask(Isolate* isolate,
                       OptimizingCompileDispatcher* dispatcher)
45 46 47 48 49
      : CancelableTask(isolate),
        isolate_(isolate),
        worker_thread_runtime_call_stats_(
            isolate->counters()->worker_thread_runtime_call_stats()),
        dispatcher_(dispatcher) {
50
    base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
51
    ++dispatcher_->ref_count_;
52
  }
53

54
  ~CompileTask() override = default;
55 56 57

 private:
  // v8::Task overrides.
58
  void RunInternal() override {
59 60 61 62
    DisallowHeapAllocation no_allocation;
    DisallowHandleAllocation no_handles;
    DisallowHandleDereference no_deref;

63
    {
64 65 66 67
      WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
          worker_thread_runtime_call_stats_);
      RuntimeCallTimerScope runtimeTimer(
          runtime_call_stats_scope.Get(),
68
          RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
69

70
      TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
71
      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
72
                   "V8.OptimizeBackground");
73

74
      if (dispatcher_->recompilation_delay_ != 0) {
75
        base::OS::Sleep(base::TimeDelta::FromMilliseconds(
76
            dispatcher_->recompilation_delay_));
77
      }
78

79 80
      dispatcher_->CompileNext(dispatcher_->NextInput(true),
                               runtime_call_stats_scope.Get());
81
    }
82
    {
83
      base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
84 85
      if (--dispatcher_->ref_count_ == 0) {
        dispatcher_->ref_count_zero_.NotifyOne();
86
      }
87 88 89 90
    }
  }

  Isolate* isolate_;
91
  WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
92
  OptimizingCompileDispatcher* dispatcher_;
93 94 95 96

  DISALLOW_COPY_AND_ASSIGN(CompileTask);
};

97
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
98 99
#ifdef DEBUG
  {
100
    base::MutexGuard lock_guard(&ref_count_mutex_);
101
    DCHECK_EQ(0, ref_count_);
102
  }
103
#endif
104
  DCHECK_EQ(0, input_queue_length_);
105 106 107
  DeleteArray(input_queue_);
}

108 109
OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
    bool check_if_flushing) {
110
  base::MutexGuard access_input_queue_(&input_queue_mutex_);
111
  if (input_queue_length_ == 0) return nullptr;
112
  OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
113
  DCHECK_NOT_NULL(job);
114 115
  input_queue_shift_ = InputQueueIndex(1);
  input_queue_length_--;
116
  if (check_if_flushing) {
117
    if (mode_ == FLUSH) {
118
      AllowHandleDereference allow_handle_dereference;
119
      DisposeCompilationJob(job, true);
120
      return nullptr;
121
    }
122
  }
123 124 125
  return job;
}

126 127
void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
                                              RuntimeCallStats* stats) {
128
  if (!job) return;
129

130
  // The function may have already been optimized by OSR.  Simply continue.
131
  CompilationJob::Status status = job->ExecuteJob(stats);
132
  USE(status);  // Prevent an unused-variable error.
133

134 135 136 137 138 139 140 141
  {
    // The function may have already been optimized by OSR.  Simply continue.
    // Use a mutex to make sure that functions marked for install
    // are always also queued.
    base::MutexGuard access_output_queue_(&output_queue_mutex_);
    output_queue_.push(job);
  }

142 143 144
  isolate_->stack_guard()->RequestInstallCode();
}

145 146
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
  for (;;) {
147
    OptimizedCompilationJob* job = nullptr;
148
    {
149
      base::MutexGuard access_output_queue_(&output_queue_mutex_);
150 151 152
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
153
    }
154

155
    DisposeCompilationJob(job, restore_function_code);
156
  }
157
}
158

159 160
void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
  if (blocking_behavior == BlockingBehavior::kDontBlock) {
161
    if (FLAG_block_concurrent_recompilation) Unblock();
162
    base::MutexGuard access_input_queue_(&input_queue_mutex_);
163
    while (input_queue_length_ > 0) {
164
      OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
165 166 167 168 169 170 171 172 173 174 175
      DCHECK_NOT_NULL(job);
      input_queue_shift_ = InputQueueIndex(1);
      input_queue_length_--;
      DisposeCompilationJob(job, true);
    }
    FlushOutputQueue(true);
    if (FLAG_trace_concurrent_recompilation) {
      PrintF("  ** Flushed concurrent recompilation queues (not blocking).\n");
    }
    return;
  }
176
  mode_ = FLUSH;
177
  if (FLAG_block_concurrent_recompilation) Unblock();
178
  {
179
    base::MutexGuard lock_guard(&ref_count_mutex_);
180
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
181
    mode_ = COMPILE;
182
  }
183
  FlushOutputQueue(true);
184
  if (FLAG_trace_concurrent_recompilation) {
185 186
    PrintF("  ** Flushed concurrent recompilation queues.\n");
  }
187 188
}

189
void OptimizingCompileDispatcher::Stop() {
190
  mode_ = FLUSH;
191
  if (FLAG_block_concurrent_recompilation) Unblock();
192
  {
193
    base::MutexGuard lock_guard(&ref_count_mutex_);
194
    while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
195
    mode_ = COMPILE;
196 197
  }

198 199 200 201
  // At this point the optimizing compiler thread's event loop has stopped.
  // There is no need for a mutex when reading input_queue_length_.
  DCHECK_EQ(input_queue_length_, 0);
  FlushOutputQueue(false);
202 203
}

204
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
205
  HandleScope handle_scope(isolate_);
206

207
  for (;;) {
208
    OptimizedCompilationJob* job = nullptr;
209
    {
210
      base::MutexGuard access_output_queue_(&output_queue_mutex_);
211 212 213 214
      if (output_queue_.empty()) return;
      job = output_queue_.front();
      output_queue_.pop();
    }
215
    OptimizedCompilationInfo* info = job->compilation_info();
216
    Handle<JSFunction> function(*info->closure(), isolate_);
217
    if (function->HasOptimizedCode()) {
218 219
      if (FLAG_trace_concurrent_recompilation) {
        PrintF("  ** Aborting compilation for ");
220
        function->ShortPrint();
221
        PrintF(" as it has already been optimized.\n");
222
      }
223
      DisposeCompilationJob(job, false);
224
    } else {
225
      Compiler::FinalizeOptimizedCompilationJob(job, isolate_);
226
    }
227
  }
228 229
}

230 231
void OptimizingCompileDispatcher::QueueForOptimization(
    OptimizedCompilationJob* job) {
232
  DCHECK(IsQueueAvailable());
233
  {
234
    // Add job to the back of the input queue.
235
    base::MutexGuard access_input_queue(&input_queue_mutex_);
236
    DCHECK_LT(input_queue_length_, input_queue_capacity_);
237 238
    input_queue_[InputQueueIndex(input_queue_length_)] = job;
    input_queue_length_++;
239
  }
240
  if (FLAG_block_concurrent_recompilation) {
241
    blocked_jobs_++;
242
  } else {
243
    V8::GetCurrentPlatform()->CallOnWorkerThread(
244
        std::make_unique<CompileTask>(isolate_, this));
245 246 247
  }
}

248
void OptimizingCompileDispatcher::Unblock() {
249
  while (blocked_jobs_ > 0) {
250
    V8::GetCurrentPlatform()->CallOnWorkerThread(
251
        std::make_unique<CompileTask>(isolate_, this));
252 253
    blocked_jobs_--;
  }
254 255
}

256 257
}  // namespace internal
}  // namespace v8