profile-generator.cc 23 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/profiler/profile-generator.h"
6

7
#include "src/base/adapters.h"
8
#include "src/debug/debug.h"
9
#include "src/deoptimizer.h"
10
#include "src/global-handles.h"
11
#include "src/profiler/cpu-profiler.h"
12
#include "src/profiler/profile-generator-inl.h"
13
#include "src/unicode.h"
14 15 16 17

namespace v8 {
namespace internal {

18

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
JITLineInfoTable::JITLineInfoTable() {}


JITLineInfoTable::~JITLineInfoTable() {}


void JITLineInfoTable::SetPosition(int pc_offset, int line) {
  DCHECK(pc_offset >= 0);
  DCHECK(line > 0);  // The 1-based number of the source line.
  if (GetSourceLineNumber(pc_offset) != line) {
    pc_offset_map_.insert(std::make_pair(pc_offset, line));
  }
}


int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
  PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
  if (it == pc_offset_map_.end()) {
    if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
    return (--pc_offset_map_.end())->second;
  }
  return it->second;
}


44
const char* const CodeEntry::kEmptyNamePrefix = "";
45
const char* const CodeEntry::kEmptyResourceName = "";
46
const char* const CodeEntry::kEmptyBailoutReason = "";
47
const char* const CodeEntry::kNoDeoptReason = "";
48

lpy's avatar
lpy committed
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
const char* const CodeEntry::kProgramEntryName = "(program)";
const char* const CodeEntry::kIdleEntryName = "(idle)";
const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";

base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
    CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;

base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
    CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;

base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
    CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;

base::LazyDynamicInstance<CodeEntry,
                          CodeEntry::UnresolvedEntryCreateTrait>::type
    CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;

CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
}

CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
}

CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
  return new CodeEntry(Logger::BUILTIN_TAG,
                       CodeEntry::kGarbageCollectorEntryName);
}

CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
  return new CodeEntry(Logger::FUNCTION_TAG,
                       CodeEntry::kUnresolvedFunctionName);
}
84

85
CodeEntry::~CodeEntry() {
86
  delete line_info_;
87 88 89 90 91
  for (auto location : inline_locations_) {
    for (auto entry : location.second) {
      delete entry;
    }
  }
92 93 94
}


95
uint32_t CodeEntry::GetHash() const {
96
  uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
97 98 99 100
  if (script_id_ != v8::UnboundScript::kNoScriptId) {
    hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
                               v8::internal::kZeroHashSeed);
    hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
101
                               v8::internal::kZeroHashSeed);
102 103
  } else {
    hash ^= ComputeIntegerHash(
104 105
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
        v8::internal::kZeroHashSeed);
106
    hash ^= ComputeIntegerHash(
107 108
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
        v8::internal::kZeroHashSeed);
109
    hash ^= ComputeIntegerHash(
110 111 112
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
        v8::internal::kZeroHashSeed);
    hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
113
  }
114 115 116 117
  return hash;
}


118 119 120 121 122 123 124 125
bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
  if (this == entry) return true;
  if (script_id_ != v8::UnboundScript::kNoScriptId) {
    return script_id_ == entry->script_id_ && position_ == entry->position_;
  }
  return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
         resource_name_ == entry->resource_name_ &&
         line_number_ == entry->line_number_;
126 127 128
}


129
void CodeEntry::SetBuiltinId(Builtins::Name id) {
130
  bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
131
  bit_field_ = BuiltinIdField::update(bit_field_, id);
132 133 134
}


135 136 137 138 139 140 141
int CodeEntry::GetSourceLine(int pc_offset) const {
  if (line_info_ && !line_info_->empty()) {
    return line_info_->GetSourceLineNumber(pc_offset);
  }
  return v8::CpuProfileNode::kNoLineNumberInfo;
}

142 143 144 145 146 147 148 149 150 151 152 153
void CodeEntry::AddInlineStack(int pc_offset,
                               std::vector<CodeEntry*>& inline_stack) {
  // It's better to use std::move to place the vector into the map,
  // but it's not supported by the current stdlibc++ on MacOS.
  inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
      .first->second.swap(inline_stack);
}

const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
  auto it = inline_locations_.find(pc_offset);
  return it != inline_locations_.end() ? &it->second : NULL;
}
154

155 156 157 158 159 160 161 162 163 164 165 166 167
void CodeEntry::AddDeoptInlinedFrames(
    int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
  // It's better to use std::move to place the vector into the map,
  // but it's not supported by the current stdlibc++ on MacOS.
  deopt_inlined_frames_
      .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
      .first->second.swap(inlined_frames);
}

bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
  return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
}

168 169 170
void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
  if (!shared->script()->IsScript()) return;
  Script* script = Script::cast(shared->script());
171
  set_script_id(script->id());
172 173 174 175
  set_position(shared->start_position());
  set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
}

176
CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
177 178
  DCHECK(has_deopt_info());

179
  CpuProfileDeoptInfo info;
180
  info.deopt_reason = deopt_reason_;
181
  DCHECK_NE(kNoDeoptimizationId, deopt_id_);
182
  if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
183 184
    info.stack.push_back(CpuProfileDeoptFrame(
        {script_id_, position_ + deopt_position_.position()}));
185 186 187 188 189 190 191 192 193
  } else {
    size_t deopt_position = deopt_position_.raw();
    // Copy stack of inlined frames where the deopt happened.
    std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
    for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
      info.stack.push_back(CpuProfileDeoptFrame(
          {inlined_frame.script_id, deopt_position + inlined_frame.position}));
      deopt_position = 0;  // Done with innermost frame.
    }
194 195 196 197 198
  }
  return info;
}


199
void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
200
  deopt_infos_.push_back(entry->GetDeoptInfo());
201 202 203 204
  entry->clear_deopt_info();
}


205
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
lpy's avatar
lpy committed
206 207
  base::HashMap::Entry* map_entry =
      children_.Lookup(entry, CodeEntryHash(entry));
208 209 210 211 212 213
  return map_entry != NULL ?
      reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
}


ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
lpy's avatar
lpy committed
214
  base::HashMap::Entry* map_entry =
215
      children_.LookupOrInsert(entry, CodeEntryHash(entry));
216 217
  ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
  if (node == NULL) {
218
    // New node added.
219 220 221
    node = new ProfileNode(tree_, entry);
    map_entry->value = node;
    children_list_.Add(node);
222
  }
223
  return node;
224 225 226
}


227 228 229 230
void ProfileNode::IncrementLineTicks(int src_line) {
  if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
  // Increment a hit counter of a certain source line.
  // Add a new source line if not found.
lpy's avatar
lpy committed
231
  base::HashMap::Entry* e =
232
      line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  DCHECK(e);
  e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
}


bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
                               unsigned int length) const {
  if (entries == NULL || length == 0) return false;

  unsigned line_count = line_ticks_.occupancy();

  if (line_count == 0) return true;
  if (length < line_count) return false;

  v8::CpuProfileNode::LineTick* entry = entries;

lpy's avatar
lpy committed
249
  for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
250 251 252 253 254 255 256 257 258 259 260
       p = line_ticks_.Next(p), entry++) {
    entry->line =
        static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
    entry->hit_count =
        static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
  }

  return true;
}


261
void ProfileNode::Print(int indent) {
262
  base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
263
                  entry_->name_prefix(), entry_->name(), entry_->script_id(),
264
                  id());
265
  if (entry_->resource_name()[0] != '\0')
266 267
    base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
  base::OS::Print("\n");
268
  for (size_t i = 0; i < deopt_infos_.size(); ++i) {
269
    CpuProfileDeoptInfo& info = deopt_infos_[i];
jfb's avatar
jfb committed
270 271 272 273
    base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
                    " with reason '%s'.\n",
                    indent + 10, "", info.stack[0].script_id,
                    info.stack[0].position, info.deopt_reason);
274
    for (size_t index = 1; index < info.stack.size(); ++index) {
jfb's avatar
jfb committed
275 276
      base::OS::Print("%*s;;;     Inline point: script_id %d position: %" PRIuS
                      ".\n",
277 278
                      indent + 10, "", info.stack[index].script_id,
                      info.stack[index].position);
279
    }
280 281 282 283 284 285 286
  }
  const char* bailout_reason = entry_->bailout_reason();
  if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
      bailout_reason != CodeEntry::kEmptyBailoutReason) {
    base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
                    bailout_reason);
  }
lpy's avatar
lpy committed
287
  for (base::HashMap::Entry* p = children_.Start(); p != NULL;
288 289 290 291 292 293 294 295
       p = children_.Next(p)) {
    reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
  }
}


class DeleteNodesCallback {
 public:
296 297
  void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }

298 299 300 301 302 303 304
  void AfterAllChildrenTraversed(ProfileNode* node) {
    delete node;
  }

  void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};

305
ProfileTree::ProfileTree(Isolate* isolate)
306
    : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
307
      next_node_id_(1),
308
      root_(new ProfileNode(this, &root_entry_)),
309
      isolate_(isolate),
310 311
      next_function_id_(1),
      function_ids_(ProfileNode::CodeEntriesMatch) {}
312

313 314
ProfileTree::~ProfileTree() {
  DeleteNodesCallback cb;
315
  TraverseDepthFirst(&cb);
316 317 318
}


319 320
unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
  CodeEntry* code_entry = node->entry();
lpy's avatar
lpy committed
321
  base::HashMap::Entry* entry =
322
      function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
323 324 325 326 327 328
  if (!entry->value) {
    entry->value = reinterpret_cast<void*>(next_function_id_++);
  }
  return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
}

329
ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
330
                                         int src_line, bool update_stats) {
331
  ProfileNode* node = root_;
332
  CodeEntry* last_entry = NULL;
333 334 335 336
  for (auto it = path.rbegin(); it != path.rend(); ++it) {
    if (*it == NULL) continue;
    last_entry = *it;
    node = node->FindOrAddChild(*it);
337
  }
338 339 340
  if (last_entry && last_entry->has_deopt_info()) {
    node->CollectDeoptInfo(last_entry);
  }
341 342 343 344 345
  if (update_stats) {
    node->IncrementSelfTicks();
    if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
      node->IncrementLineTicks(src_line);
    }
346
  }
347
  return node;
348 349 350
}


351 352 353 354 355 356 357 358
struct NodesPair {
  NodesPair(ProfileNode* src, ProfileNode* dst)
      : src(src), dst(dst) { }
  ProfileNode* src;
  ProfileNode* dst;
};


359 360 361 362
class Position {
 public:
  explicit Position(ProfileNode* node)
      : node(node), child_idx_(0) { }
363
  INLINE(ProfileNode* current_child()) {
364
    return node->children()->at(child_idx_);
365
  }
366 367 368 369 370
  INLINE(bool has_current_child()) {
    return child_idx_ < node->children()->length();
  }
  INLINE(void next_child()) { ++child_idx_; }

371
  ProfileNode* node;
372 373
 private:
  int child_idx_;
374 375 376
};


377
// Non-recursive implementation of a depth-first post-order tree traversal.
378
template <typename Callback>
379
void ProfileTree::TraverseDepthFirst(Callback* callback) {
380
  List<Position> stack(10);
381
  stack.Add(Position(root_));
382
  while (stack.length() > 0) {
383
    Position& current = stack.last();
384
    if (current.has_current_child()) {
385
      callback->BeforeTraversingChild(current.node, current.current_child());
386
      stack.Add(Position(current.current_child()));
387 388 389 390 391
    } else {
      callback->AfterAllChildrenTraversed(current.node);
      if (stack.length() > 1) {
        Position& parent = stack[stack.length() - 2];
        callback->AfterChildTraversed(parent.node, current.node);
392
        parent.next_child();
393
      }
394 395
      // Remove child from the stack.
      stack.RemoveLast();
396
    }
397
  }
398 399
}

400 401
CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
                       bool record_samples)
402 403
    : title_(title),
      record_samples_(record_samples),
404
      start_time_(base::TimeTicks::HighResolutionNow()),
405 406
      top_down_(profiler->isolate()),
      profiler_(profiler) {}
407

408
void CpuProfile::AddPath(base::TimeTicks timestamp,
409
                         const std::vector<CodeEntry*>& path, int src_line,
410 411 412
                         bool update_stats) {
  ProfileNode* top_frame_node =
      top_down_.AddPathFromEnd(path, src_line, update_stats);
413
  if (record_samples_ && !timestamp.IsNull()) {
414 415 416
    timestamps_.Add(timestamp);
    samples_.Add(top_frame_node);
  }
417 418
}

419
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
420
  end_time_ = base::TimeTicks::HighResolutionNow();
421 422 423
}

void CpuProfile::Print() {
424
  base::OS::Print("[Top down]:\n");
425 426 427
  top_down_.Print();
}

428 429
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
  DeleteAllCoveredCode(addr, addr + size);
430
  code_map_.insert({addr, CodeEntryInfo(entry, size)});
431 432 433
}

void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
434 435 436 437
  auto left = code_map_.upper_bound(start);
  if (left != code_map_.begin()) {
    --left;
    if (left->first + left->second.size <= start) ++left;
438
  }
439 440 441
  auto right = left;
  while (right != code_map_.end() && right->first < end) ++right;
  code_map_.erase(left, right);
442 443
}

yurys's avatar
yurys committed
444
CodeEntry* CodeMap::FindEntry(Address addr) {
445 446 447 448 449
  auto it = code_map_.upper_bound(addr);
  if (it == code_map_.begin()) return nullptr;
  --it;
  Address end_address = it->first + it->second.size;
  return addr < end_address ? it->second.entry : nullptr;
450 451
}

452 453
void CodeMap::MoveCode(Address from, Address to) {
  if (from == to) return;
454 455 456 457 458
  auto it = code_map_.find(from);
  if (it == code_map_.end()) return;
  CodeEntryInfo info = it->second;
  code_map_.erase(it);
  AddCode(to, info.entry, info.size);
459 460
}

461
void CodeMap::Print() {
462 463 464 465
  for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
    base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
                    it->second.size, it->second.entry->name());
  }
466 467
}

468
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
lpy's avatar
lpy committed
469
    : resource_names_(isolate->heap()),
470
      profiler_(nullptr),
471
      current_profiles_semaphore_(1) {}
472

473 474 475 476
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
  delete *profile_ptr;
}

477

478
CpuProfilesCollection::~CpuProfilesCollection() {
479
  finished_profiles_.Iterate(DeleteCpuProfile);
480
  current_profiles_.Iterate(DeleteCpuProfile);
481
}
482

483

484
bool CpuProfilesCollection::StartProfiling(const char* title,
485
                                           bool record_samples) {
486
  current_profiles_semaphore_.Wait();
487
  if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
488
    current_profiles_semaphore_.Signal();
489 490
    return false;
  }
491 492
  for (int i = 0; i < current_profiles_.length(); ++i) {
    if (strcmp(current_profiles_[i]->title(), title) == 0) {
493
      // Ignore attempts to start profile with the same title...
494
      current_profiles_semaphore_.Signal();
495 496
      // ... though return true to force it collect a sample.
      return true;
497 498
    }
  }
499
  current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
500
  current_profiles_semaphore_.Signal();
501 502 503 504
  return true;
}


505
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
506
  const int title_len = StrLength(title);
507
  CpuProfile* profile = NULL;
508
  current_profiles_semaphore_.Wait();
509 510 511 512 513 514
  for (int i = current_profiles_.length() - 1; i >= 0; --i) {
    if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
      profile = current_profiles_.Remove(i);
      break;
    }
  }
515
  current_profiles_semaphore_.Signal();
516

517
  if (profile == NULL) return NULL;
518
  profile->CalculateTotalTicksAndSamplingRate();
519 520
  finished_profiles_.Add(profile);
  return profile;
521 522 523
}


524 525 526 527 528 529 530 531 532
bool CpuProfilesCollection::IsLastProfile(const char* title) {
  // Called from VM thread, and only it can mutate the list,
  // so no locking is needed here.
  if (current_profiles_.length() != 1) return false;
  return StrLength(title) == 0
      || strcmp(current_profiles_[0]->title(), title) == 0;
}


533 534
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
  // Called from VM thread for a completed profile.
535
  for (int i = 0; i < finished_profiles_.length(); i++) {
536
    if (profile == finished_profiles_[i]) {
537 538
      finished_profiles_.Remove(i);
      return;
539 540
    }
  }
541
  UNREACHABLE();
542 543
}

544
void CpuProfilesCollection::AddPathToCurrentProfiles(
545 546
    base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
    int src_line, bool update_stats) {
547 548 549
  // As starting / stopping profiles is rare relatively to this
  // method, we don't bother minimizing the duration of lock holding,
  // e.g. copying contents of the list to a local vector.
550
  current_profiles_semaphore_.Wait();
551
  for (int i = 0; i < current_profiles_.length(); ++i) {
552
    current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
553
  }
554
  current_profiles_semaphore_.Signal();
555 556
}

557
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
lpy's avatar
lpy committed
558
    : profiles_(profiles) {}
559 560

void ProfileGenerator::RecordTickSample(const TickSample& sample) {
561 562 563 564
  std::vector<CodeEntry*> entries;
  // Conservatively reserve space for stack frames + pc + function + vm-state.
  // There could in fact be more of them because of inlined entries.
  entries.reserve(sample.frames_count + 3);
565 566 567 568 569 570 571 572 573

  // The ProfileNode knows nothing about all versions of generated code for
  // the same JS function. The line number information associated with
  // the latest version of generated code is used to find a source line number
  // for a JS function. Then, the detected source line is passed to
  // ProfileNode to increase the tick count for this source line.
  int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
  bool src_line_not_found = true;

574 575
  if (sample.pc != nullptr) {
    if (sample.has_external_callback && sample.state == EXTERNAL) {
576 577 578
      // Don't use PC when in external callback code, as it can point
      // inside callback's code, and we will erroneously report
      // that a callback calls itself.
579 580
      entries.push_back(code_map_.FindEntry(
          reinterpret_cast<Address>(sample.external_callback_entry)));
581
    } else {
582 583
      CodeEntry* pc_entry =
          code_map_.FindEntry(reinterpret_cast<Address>(sample.pc));
584 585 586
      // If there is no pc_entry we're likely in native code.
      // Find out, if top of stack was pointing inside a JS function
      // meaning that we have encountered a frameless invocation.
587
      if (!pc_entry && !sample.has_external_callback) {
588
        pc_entry = code_map_.FindEntry(reinterpret_cast<Address>(sample.tos));
589
      }
590 591 592 593 594
      // If pc is in the function code before it set up stack frame or after the
      // frame was destroyed SafeStackFrameIterator incorrectly thinks that
      // ebp contains return address of the current function and skips caller's
      // frame. Check for this case and just skip such samples.
      if (pc_entry) {
595 596
        int pc_offset = static_cast<int>(reinterpret_cast<Address>(sample.pc) -
                                         pc_entry->instruction_start());
597 598 599 600 601
        src_line = pc_entry->GetSourceLine(pc_offset);
        if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
          src_line = pc_entry->line_number();
        }
        src_line_not_found = false;
602
        entries.push_back(pc_entry);
603

604 605 606 607 608 609
        if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
            pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
          // When current function is either the Function.prototype.apply or the
          // Function.prototype.call builtin the top frame is either frame of
          // the calling JS function or internal frame.
          // In the latter case we know the caller for sure but in the
610 611
          // former case we don't so we simply replace the frame with
          // 'unresolved' entry.
612
          if (!sample.has_external_callback) {
lpy's avatar
lpy committed
613
            entries.push_back(CodeEntry::unresolved_entry());
614
          }
615 616
        }
      }
617 618
    }

619 620 621
    for (unsigned i = 0; i < sample.frames_count; ++i) {
      Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
      CodeEntry* entry = code_map_.FindEntry(stack_pos);
622

623 624
      if (entry) {
        // Find out if the entry has an inlining stack associated.
625
        int pc_offset =
626
            static_cast<int>(stack_pos - entry->instruction_start());
627 628 629 630 631 632 633 634 635 636 637 638 639 640
        const std::vector<CodeEntry*>* inline_stack =
            entry->GetInlineStack(pc_offset);
        if (inline_stack) {
          entries.insert(entries.end(), inline_stack->rbegin(),
                         inline_stack->rend());
        }
        // Skip unresolved frames (e.g. internal frame) and get source line of
        // the first JS caller.
        if (src_line_not_found) {
          src_line = entry->GetSourceLine(pc_offset);
          if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
            src_line = entry->line_number();
          }
          src_line_not_found = false;
641 642
        }
      }
643
      entries.push_back(entry);
644
    }
645 646
  }

647
  if (FLAG_prof_browser_mode) {
648
    bool no_symbolized_entries = true;
649 650
    for (auto e : entries) {
      if (e != NULL) {
651 652 653 654 655 656
        no_symbolized_entries = false;
        break;
      }
    }
    // If no frames were symbolized, put the VM state entry in.
    if (no_symbolized_entries) {
657
      entries.push_back(EntryForVMState(sample.state));
658
    }
659
  }
660

661 662
  profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
                                      sample.update_stats);
663 664
}

665

666 667 668
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
  switch (tag) {
    case GC:
lpy's avatar
lpy committed
669
      return CodeEntry::gc_entry();
670 671 672 673 674 675 676
    case JS:
    case COMPILER:
    // DOM events handlers are reported as OTHER / EXTERNAL entries.
    // To avoid confusing people, let's put all these entries into
    // one bucket.
    case OTHER:
    case EXTERNAL:
lpy's avatar
lpy committed
677
      return CodeEntry::program_entry();
678
    case IDLE:
lpy's avatar
lpy committed
679
      return CodeEntry::idle_entry();
680 681 682 683
    default: return NULL;
  }
}

684 685
}  // namespace internal
}  // namespace v8