runtime-profiler.cc 17.1 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

#include "runtime-profiler.h"

#include "assembler.h"
#include "code-stubs.h"
#include "compilation-cache.h"
#include "deoptimizer.h"
#include "execution.h"
37
#include "full-codegen.h"
38
#include "global-handles.h"
39
#include "isolate-inl.h"
40
#include "mark-compact.h"
41
#include "platform.h"
42 43 44 45 46 47 48 49
#include "scopeinfo.h"

namespace v8 {
namespace internal {


// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
50 51

// Constants for statistical profiler.
52 53
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };

54
static const int kSamplerTicksBetweenThresholdAdjustment = 32;
55 56 57 58 59 60 61 62 63

static const int kSamplerThresholdInit = 3;
static const int kSamplerThresholdMin = 1;
static const int kSamplerThresholdDelta = 1;

static const int kSamplerThresholdSizeFactorInit = 3;

static const int kSizeLimit = 1500;

64 65 66 67 68
// Constants for counter based profiler.

// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
69 70 71 72
// If the function optimization was disabled due to high deoptimization count,
// but the function is hot and has been seen on the stack this number of times,
// then we try to reenable optimization for this function.
static const int kProfilerTicksBeforeReenablingOptimization = 250;
73 74 75 76 77
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
78 79
STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
80
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
81

82

83 84
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
85 86
static const int kMaxSizeEarlyOpt =
    5 * FullCodeGenerator::kBackEdgeDistanceUnit;
87

88

89
Atomic32 RuntimeProfiler::state_ = 0;
90 91 92

// TODO(isolates): Clean up the semaphore when it is no longer required.
static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
93

94
#ifdef DEBUG
95
bool RuntimeProfiler::has_been_globally_set_up_ = false;
96 97 98
#endif
bool RuntimeProfiler::enabled_ = false;

99 100 101 102 103 104

RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
    : isolate_(isolate),
      sampler_threshold_(kSamplerThresholdInit),
      sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
      sampler_ticks_until_threshold_adjustment_(
105
          kSamplerTicksBetweenThresholdAdjustment),
106 107 108
      sampler_window_position_(0),
      any_ic_changed_(false),
      code_generated_(false) {
109 110 111 112
  ClearSampleBuffer();
}


113
void RuntimeProfiler::GlobalSetUp() {
114
  ASSERT(!has_been_globally_set_up_);
115 116
  enabled_ = V8::UseCrankshaft() && FLAG_opt;
#ifdef DEBUG
117
  has_been_globally_set_up_ = true;
118
#endif
119 120 121
}


122
static void GetICCounts(JSFunction* function,
123
                        int* ic_with_type_info_count,
124 125 126
                        int* ic_total_count,
                        int* percentage) {
  *ic_total_count = 0;
127
  *ic_with_type_info_count = 0;
128 129 130 131
  Object* raw_info =
      function->shared()->code()->type_feedback_info();
  if (raw_info->IsTypeFeedbackInfo()) {
    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
132
    *ic_with_type_info_count = info->ic_with_type_info_count();
133 134 135
    *ic_total_count = info->ic_total_count();
  }
  *percentage = *ic_total_count > 0
136
      ? 100 * *ic_with_type_info_count / *ic_total_count
137 138 139 140
      : 100;
}


141
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
142
  ASSERT(function->IsOptimizable());
143
  if (FLAG_trace_opt) {
144
    PrintF("[marking ");
145
    function->PrintName();
146
    PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
147
    PrintF(" for recompilation, reason: %s", reason);
148 149 150 151 152
    if (FLAG_type_info_threshold > 0) {
      int typeinfo, total, percentage;
      GetICCounts(function, &typeinfo, &total, &percentage);
      PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
    }
153 154 155
    PrintF("]\n");
  }

156 157 158 159 160 161
  if (FLAG_parallel_recompilation) {
    function->MarkForParallelRecompilation();
  } else {
    // The next call to the function will trigger optimization.
    function->MarkForLazyRecompilation();
  }
162 163 164
}


165
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
166 167
  // See AlwaysFullCompiler (in compiler.cc) comment on why we need
  // Debug::has_break_points().
168 169
  ASSERT(function->IsMarkedForLazyRecompilation() ||
         function->IsMarkedForParallelRecompilation());
170
  if (!FLAG_use_osr ||
171
      isolate_->DebuggerHasBreakPoints() ||
172
      function->IsBuiltin()) {
173 174 175 176
    return;
  }

  SharedFunctionInfo* shared = function->shared();
177 178
  // If the code is not optimizable, don't try OSR.
  if (!shared->code()->optimizable()) return;
179 180 181 182

  // We are not prepared to do OSR for a function that already has an
  // allocated arguments object.  The optimized code would bypass it for
  // arguments accesses, which is unsound.  Don't try OSR.
183
  if (shared->uses_arguments()) return;
184 185 186 187 188 189 190 191 192 193 194 195

  // We're using on-stack replacement: patch the unoptimized code so that
  // any back edge in any unoptimized frame will trigger on-stack
  // replacement for that frame.
  if (FLAG_trace_osr) {
    PrintF("[patching stack checks in ");
    function->PrintName();
    PrintF(" for on-stack replacement]\n");
  }

  // Get the stack check stub code object to match against.  We aren't
  // prepared to generate it, but we don't expect to have to.
196
  bool found_code = false;
197
  Code* stack_check_code = NULL;
198 199 200
  if (FLAG_count_based_interrupts) {
    InterruptStub interrupt_stub;
    found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
201 202
  } else  // NOLINT
  {  // NOLINT
203 204 205 206
    StackCheckStub check_stub;
    found_code = check_stub.FindCodeInCache(&stack_check_code);
  }
  if (found_code) {
207
    Code* replacement_code =
208
        isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
209
    Code* unoptimized_code = shared->code();
210
    Deoptimizer::PatchStackCheckCode(unoptimized_code,
211
                                     stack_check_code,
212
                                     replacement_code);
213 214 215 216
  }
}


217 218 219
void RuntimeProfiler::ClearSampleBuffer() {
  memset(sampler_window_, 0, sizeof(sampler_window_));
  memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
220 221 222
}


223
int RuntimeProfiler::LookupSample(JSFunction* function) {
224 225
  int weight = 0;
  for (int i = 0; i < kSamplerWindowSize; i++) {
226
    Object* sample = sampler_window_[i];
227
    if (sample != NULL) {
228 229 230 231
      bool fits = FLAG_lookup_sample_by_shared
          ? (function->shared() == JSFunction::cast(sample)->shared())
          : (function == JSFunction::cast(sample));
      if (fits) {
232
        weight += sampler_window_weight_[i];
233 234 235 236 237 238 239
      }
    }
  }
  return weight;
}


240
void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
241
  ASSERT(IsPowerOf2(kSamplerWindowSize));
242 243 244
  sampler_window_[sampler_window_position_] = function;
  sampler_window_weight_[sampler_window_position_] = weight;
  sampler_window_position_ = (sampler_window_position_ + 1) &
245 246 247 248 249
      (kSamplerWindowSize - 1);
}


void RuntimeProfiler::OptimizeNow() {
250
  HandleScope scope(isolate_);
251 252 253 254 255

  // Run through the JavaScript frames and collect them. If we already
  // have a sample of the function, we mark it for optimizations
  // (eagerly or lazily).
  JSFunction* samples[kSamplerFrameCount];
256 257
  int sample_count = 0;
  int frame_count = 0;
258 259
  int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
                                                 : kSamplerFrameCount;
260
  for (JavaScriptFrameIterator it(isolate_);
261
       frame_count++ < frame_count_limit && !it.done();
262 263 264
       it.Advance()) {
    JavaScriptFrame* frame = it.frame();
    JSFunction* function = JSFunction::cast(frame->function());
265

266
    if (!FLAG_watch_ic_patching) {
267 268 269 270 271 272 273 274 275 276 277 278
      // Adjust threshold each time we have processed
      // a certain number of ticks.
      if (sampler_ticks_until_threshold_adjustment_ > 0) {
        sampler_ticks_until_threshold_adjustment_--;
        if (sampler_ticks_until_threshold_adjustment_ <= 0) {
          // If the threshold is not already at the minimum
          // modify and reset the ticks until next adjustment.
          if (sampler_threshold_ > kSamplerThresholdMin) {
            sampler_threshold_ -= kSamplerThresholdDelta;
            sampler_ticks_until_threshold_adjustment_ =
                kSamplerTicksBetweenThresholdAdjustment;
          }
279 280
        }
      }
281 282
    }

283 284 285
    SharedFunctionInfo* shared = function->shared();
    Code* shared_code = shared->code();

286 287
    if (shared_code->kind() != Code::FUNCTION) continue;

288 289
    if (function->IsMarkedForLazyRecompilation() ||
        function->IsMarkedForParallelRecompilation()) {
290
      int nesting = shared_code->allow_osr_at_loop_nesting_level();
291 292
      if (nesting == 0) AttemptOnStackReplacement(function);
      int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
293
      shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
294 295
    }

296 297 298 299
    // Only record top-level code on top of the execution stack and
    // avoid optimizing excessively large scripts since top-level code
    // will be executed only once.
    const int kMaxToplevelSourceSize = 10 * 1024;
300 301 302 303 304 305 306
    if (shared->is_toplevel() &&
        (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
      continue;
    }

    // Do not record non-optimizable functions.
    if (shared->optimization_disabled()) {
307
      if (shared->deopt_count() >= FLAG_max_opt_count) {
308 309 310 311 312 313 314 315 316 317
        // If optimization was disabled due to many deoptimizations,
        // then check if the function is hot and try to reenable optimization.
        int ticks = shared_code->profiler_ticks();
        if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
          shared_code->set_profiler_ticks(0);
          shared->TryReenableOptimization();
        } else {
          shared_code->set_profiler_ticks(ticks + 1);
        }
      }
318 319
      continue;
    }
320 321
    if (!function->IsOptimizable()) continue;

322
    if (FLAG_watch_ic_patching) {
323
      int ticks = shared_code->profiler_ticks();
324 325

      if (ticks >= kProfilerTicksBeforeOptimization) {
326 327 328 329 330 331
        int typeinfo, total, percentage;
        GetICCounts(function, &typeinfo, &total, &percentage);
        if (percentage >= FLAG_type_info_threshold) {
          // If this particular function hasn't had any ICs patched for enough
          // ticks, optimize it now.
          Optimize(function, "hot and stable");
332
        } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
333
          Optimize(function, "not much type info but very hot");
334
        } else {
335
          shared_code->set_profiler_ticks(ticks + 1);
336 337 338 339 340 341 342
          if (FLAG_trace_opt_verbose) {
            PrintF("[not yet optimizing ");
            function->PrintName();
            PrintF(", not enough type info: %d/%d (%d%%)]\n",
                   typeinfo, total, percentage);
          }
        }
343
      } else if (!any_ic_changed_ &&
344
                 shared_code->instruction_size() < kMaxSizeEarlyOpt) {
345 346 347 348
        // If no IC was patched since the last tick and this function is very
        // small, optimistically optimize it now.
        Optimize(function, "small function");
      } else {
349
        shared_code->set_profiler_ticks(ticks + 1);
350
      }
351
    } else {  // !FLAG_watch_ic_patching
352 353
      samples[sample_count++] = function;

354
      int function_size = function->shared()->SourceSize();
355 356 357
      int threshold_size_factor = (function_size > kSizeLimit)
          ? sampler_threshold_size_factor_
          : 1;
358

359
      int threshold = sampler_threshold_ * threshold_size_factor;
360

361 362 363
      if (LookupSample(function) >= threshold) {
        Optimize(function, "sampler window lookup");
      }
364 365
    }
  }
366
  if (FLAG_watch_ic_patching) {
367
    any_ic_changed_ = false;
368
  } else {  // !FLAG_watch_ic_patching
369 370 371 372 373 374
    // Add the collected functions as samples. It's important not to do
    // this as part of collecting them because this will interfere with
    // the sample lookup in case of recursive functions.
    for (int i = 0; i < sample_count; i++) {
      AddSample(samples[i], kSamplerFrameWeight[i]);
    }
375 376 377 378 379
  }
}


void RuntimeProfiler::NotifyTick() {
380
  if (FLAG_count_based_interrupts) return;
381
  isolate_->stack_guard()->RequestRuntimeProfilerTick();
382 383 384
}


385 386
void RuntimeProfiler::SetUp() {
  ASSERT(has_been_globally_set_up_);
387
  if (!FLAG_watch_ic_patching) {
388 389
    ClearSampleBuffer();
  }
390 391
  // If the ticker hasn't already started, make sure to do so to get
  // the ticks for the runtime profiler.
392
  if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
393 394 395 396
}


void RuntimeProfiler::Reset() {
397
  if (!FLAG_watch_ic_patching) {
398 399 400 401 402
    sampler_threshold_ = kSamplerThresholdInit;
    sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
    sampler_ticks_until_threshold_adjustment_ =
        kSamplerTicksBetweenThresholdAdjustment;
  }
403 404 405 406 407 408 409 410
}


void RuntimeProfiler::TearDown() {
  // Nothing to do.
}


411 412
int RuntimeProfiler::SamplerWindowSize() {
  return kSamplerWindowSize;
413 414 415
}


416 417 418
// Update the pointers in the sampler window after a GC.
void RuntimeProfiler::UpdateSamplesAfterScavenge() {
  for (int i = 0; i < kSamplerWindowSize; i++) {
419 420
    Object* function = sampler_window_[i];
    if (function != NULL && isolate_->heap()->InNewSpace(function)) {
421 422
      MapWord map_word = HeapObject::cast(function)->map_word();
      if (map_word.IsForwardingAddress()) {
423
        sampler_window_[i] = map_word.ToForwardingAddress();
424
      } else {
425
        sampler_window_[i] = NULL;
426 427 428 429 430 431
      }
    }
  }
}


432 433 434 435 436 437 438
void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
  // The profiler thread must still be waiting.
  ASSERT(NoBarrier_Load(&state_) >= 0);
  // In IsolateEnteredJS we have already incremented the counter and
  // undid the decrement done by the profiler thread. Increment again
  // to get the right count of active isolates.
  NoBarrier_AtomicIncrement(&state_, 1);
439
  semaphore.Pointer()->Signal();
440 441 442 443 444 445 446 447 448 449 450 451
}


bool RuntimeProfiler::IsSomeIsolateInJS() {
  return NoBarrier_Load(&state_) > 0;
}


bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
  Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
  ASSERT(old_state >= -1);
  if (old_state != 0) return false;
452
  semaphore.Pointer()->Wait();
453 454 455 456
  return true;
}


457 458 459 460 461 462 463 464 465 466 467
void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
  // Do a fake increment. If the profiler is waiting on the semaphore,
  // the returned state is 0, which can be left as an initial state in
  // case profiling is restarted later. If the profiler is not
  // waiting, the increment will prevent it from waiting, but has to
  // be undone after the profiler is stopped.
  Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
  ASSERT(new_state >= 0);
  if (new_state == 0) {
    // The profiler thread is waiting. Wake it up. It must check for
    // stop conditions before attempting to wait again.
468
    semaphore.Pointer()->Signal();
469 470 471 472 473 474 475
  }
  thread->Join();
  // The profiler thread is now stopped. Undo the increment in case it
  // was not waiting.
  if (new_state != 0) {
    NoBarrier_AtomicIncrement(&state_, -1);
  }
476 477 478
}


479 480
void RuntimeProfiler::RemoveDeadSamples() {
  for (int i = 0; i < kSamplerWindowSize; i++) {
481
    Object* function = sampler_window_[i];
482 483
    if (function != NULL &&
        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
484
      sampler_window_[i] = NULL;
485 486 487 488 489 490 491
    }
  }
}


void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
  for (int i = 0; i < kSamplerWindowSize; i++) {
492
    visitor->VisitPointer(&sampler_window_[i]);
493
  }
494 495 496 497
}


bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
498 499
  if (!RuntimeProfiler::IsSomeIsolateInJS()) {
    return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
500 501 502 503 504 505
  }
  return false;
}


} }  // namespace v8::internal