profile-generator.h 19.2 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_PROFILER_PROFILE_GENERATOR_H_
#define V8_PROFILER_PROFILE_GENERATOR_H_
7

8
#include <atomic>
9
#include <deque>
10
#include <limits>
11
#include <map>
12
#include <memory>
13
#include <unordered_map>
14
#include <utility>
15 16
#include <vector>

17
#include "include/v8-profiler.h"
18
#include "src/base/platform/time.h"
19
#include "src/builtins/builtins.h"
20
#include "src/execution/vm-state.h"
21
#include "src/logging/code-events.h"
22
#include "src/profiler/strings-storage.h"
23
#include "src/utils/allocation.h"
24 25

namespace v8 {
26
namespace internal {
27 28 29

struct TickSample;

30
// Provides a mapping from the offsets within generated code or a bytecode array
31
// to the source line and inlining id.
32
class V8_EXPORT_PRIVATE SourcePositionTable : public Malloced {
33
 public:
34
  SourcePositionTable() = default;
35 36
  SourcePositionTable(const SourcePositionTable&) = delete;
  SourcePositionTable& operator=(const SourcePositionTable&) = delete;
37

38
  void SetPosition(int pc_offset, int line, int inlining_id);
39
  int GetSourceLineNumber(int pc_offset) const;
40 41
  int GetInliningId(int pc_offset) const;

42
  size_t Size() const;
43
  void print() const;
44 45

 private:
46 47
  struct SourcePositionTuple {
    bool operator<(const SourcePositionTuple& other) const {
48 49 50 51
      return pc_offset < other.pc_offset;
    }
    int pc_offset;
    int line_number;
52
    int inlining_id;
53
  };
54
  // This is logically a map, but we store it as a vector of tuples, sorted by
55 56
  // the pc offset, so that we can save space and look up items using binary
  // search.
57
  std::vector<SourcePositionTuple> pc_offsets_to_lines_;
58 59
};

60
struct CodeEntryAndLineNumber;
61

62 63
class CodeEntry {
 public:
64 65
  enum class CodeType { JS, WASM, OTHER };

66 67
  // CodeEntry may reference strings (|name|, |resource_name|) managed by a
  // StringsStorage instance. These must be freed via ReleaseStrings.
68
  inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
69
                   const char* resource_name = CodeEntry::kEmptyResourceName,
70
                   int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
71
                   int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
72
                   std::unique_ptr<SourcePositionTable> line_info = nullptr,
73 74
                   bool is_shared_cross_origin = false,
                   CodeType code_type = CodeType::JS);
75 76
  CodeEntry(const CodeEntry&) = delete;
  CodeEntry& operator=(const CodeEntry&) = delete;
77 78 79 80
  ~CodeEntry() {
    // No alive handles should be associated with the CodeEntry at time of
    // destruction.
    DCHECK(!heap_object_location_);
81
    DCHECK_EQ(ref_count_, 0UL);
82
  }
83

84 85 86
  const char* name() const { return name_; }
  const char* resource_name() const { return resource_name_; }
  int line_number() const { return line_number_; }
87
  int column_number() const { return column_number_; }
88
  const SourcePositionTable* line_info() const { return line_info_.get(); }
89 90
  int script_id() const { return script_id_; }
  void set_script_id(int script_id) { script_id_ = script_id; }
91 92
  int position() const { return position_; }
  void set_position(int position) { position_ = position; }
93
  void set_bailout_reason(const char* bailout_reason) {
94 95 96 97
    EnsureRareData()->bailout_reason_ = bailout_reason;
  }
  const char* bailout_reason() const {
    return rare_data_ ? rare_data_->bailout_reason_ : kEmptyBailoutReason;
98
  }
99

100 101 102
  void set_deopt_info(const char* deopt_reason, int deopt_id,
                      std::vector<CpuProfileDeoptFrame> inlined_frames);

103
  size_t EstimatedSize() const;
104
  CpuProfileDeoptInfo GetDeoptInfo();
105 106 107
  bool has_deopt_info() const {
    return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
  }
108
  void clear_deopt_info() {
109 110 111 112
    if (!rare_data_) return;
    // TODO(alph): Clear rare_data_ if that was the only field in use.
    rare_data_->deopt_reason_ = kNoDeoptReason;
    rare_data_->deopt_id_ = kNoDeoptimizationId;
113
  }
114

115 116 117 118 119 120 121 122 123 124 125
  const char* code_type_string() const {
    switch (CodeTypeField::decode(bit_field_)) {
      case CodeType::JS:
        return "JS";
      case CodeType::WASM:
        return "wasm";
      case CodeType::OTHER:
        return "other";
    }
  }

126 127 128 129 130 131 132
  // Returns the start address of the instruction segment represented by this
  // CodeEntry. Used as a key in the containing CodeMap.
  Address instruction_start() const { return instruction_start_; }
  void set_instruction_start(Address address) { instruction_start_ = address; }

  Address** heap_object_location_address() { return &heap_object_location_; }

133
  void FillFunctionInfo(SharedFunctionInfo shared);
134

135
  void SetBuiltinId(Builtin id);
136
  Builtin builtin() const { return BuiltinField::decode(bit_field_); }
137

138 139 140 141
  bool is_shared_cross_origin() const {
    return SharedCrossOriginField::decode(bit_field_);
  }

142 143 144 145
  // Returns whether or not the lifetime of this CodeEntry is reference
  // counted, and managed by a CodeMap.
  bool is_ref_counted() const { return RefCountedField::decode(bit_field_); }

146
  uint32_t GetHash() const;
147
  bool IsSameFunctionAs(const CodeEntry* entry) const;
148

149 150
  int GetSourceLine(int pc_offset) const;

151
  struct Equals {
152 153
    bool operator()(const CodeEntry* lhs, const CodeEntry* rhs) const {
      return lhs->IsSameFunctionAs(rhs);
154 155 156
    }
  };
  struct Hasher {
157
    std::size_t operator()(CodeEntry* e) const { return e->GetHash(); }
158 159
  };

160
  void SetInlineStacks(
161
      std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries,
162 163 164 165
      std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
          inline_stacks);
  const std::vector<CodeEntryAndLineNumber>* GetInlineStack(
      int pc_offset) const;
166

167 168 169
  CodeEventListener::LogEventsAndTags tag() const {
    return TagField::decode(bit_field_);
  }
170

171
  V8_EXPORT_PRIVATE static const char* const kEmptyResourceName;
172
  static const char* const kEmptyBailoutReason;
173
  static const char* const kNoDeoptReason;
174

175 176
  V8_EXPORT_PRIVATE static const char* const kProgramEntryName;
  V8_EXPORT_PRIVATE static const char* const kIdleEntryName;
177
  V8_EXPORT_PRIVATE static const char* const kGarbageCollectorEntryName;
lpy's avatar
lpy committed
178 179
  // Used to represent frames for which we have no reliable way to
  // detect function.
180
  V8_EXPORT_PRIVATE static const char* const kUnresolvedFunctionName;
181
  V8_EXPORT_PRIVATE static const char* const kRootEntryName;
lpy's avatar
lpy committed
182

183 184 185 186 187
  V8_EXPORT_PRIVATE static CodeEntry* program_entry();
  V8_EXPORT_PRIVATE static CodeEntry* idle_entry();
  V8_EXPORT_PRIVATE static CodeEntry* gc_entry();
  V8_EXPORT_PRIVATE static CodeEntry* unresolved_entry();
  V8_EXPORT_PRIVATE static CodeEntry* root_entry();
lpy's avatar
lpy committed
188

189 190 191 192 193 194
  // Releases strings owned by this CodeEntry, which may be allocated in the
  // provided StringsStorage instance. This instance is not stored directly
  // with the CodeEntry in order to reduce memory footprint.
  // Called before every destruction.
  void ReleaseStrings(StringsStorage& strings);

195 196
  void print() const;

197
 private:
198 199
  friend class CodeEntryStorage;

200 201
  struct RareData {
    const char* deopt_reason_ = kNoDeoptReason;
202
    const char* bailout_reason_ = kEmptyBailoutReason;
203
    int deopt_id_ = kNoDeoptimizationId;
204
    std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks_;
205
    std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries_;
206
    std::vector<CpuProfileDeoptFrame> deopt_inlined_frames_;
207 208 209 210
  };

  RareData* EnsureRareData();

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
  void mark_ref_counted() {
    bit_field_ = RefCountedField::update(bit_field_, true);
    ref_count_ = 1;
  }

  size_t AddRef() {
    DCHECK(is_ref_counted());
    DCHECK_LT(ref_count_, std::numeric_limits<size_t>::max());
    ref_count_++;
    return ref_count_;
  }

  size_t DecRef() {
    DCHECK(is_ref_counted());
    DCHECK_GT(ref_count_, 0UL);
    ref_count_--;
    return ref_count_;
  }

230
  using TagField = base::BitField<CodeEventListener::LogEventsAndTags, 0, 8>;
231 232
  using BuiltinField = base::BitField<Builtin, 8, 20>;
  static_assert(Builtins::kBuiltinCount <= BuiltinField::kNumValues,
233
                "builtin_count exceeds size of bitfield");
234 235
  using RefCountedField = base::BitField<bool, 28, 1>;
  using CodeTypeField = base::BitField<CodeType, 29, 2>;
236
  using SharedCrossOriginField = base::BitField<bool, 31, 1>;
237

238 239
  std::uint32_t bit_field_;
  std::atomic<std::size_t> ref_count_ = {0};
240 241 242
  const char* name_;
  const char* resource_name_;
  int line_number_;
243
  int column_number_;
244
  int script_id_;
245
  int position_;
246
  std::unique_ptr<SourcePositionTable> line_info_;
247
  std::unique_ptr<RareData> rare_data_;
248 249
  Address instruction_start_ = kNullAddress;
  Address* heap_object_location_ = nullptr;
250 251
};

252 253 254 255 256
struct CodeEntryAndLineNumber {
  CodeEntry* code_entry;
  int line_number;
};

257
using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
258

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
// Filters stack frames from sources other than a target native context.
class ContextFilter {
 public:
  explicit ContextFilter(Address native_context_address = kNullAddress)
      : native_context_address_(native_context_address) {}

  // Invoked when a native context has changed address.
  void OnMoveEvent(Address from_address, Address to_address);

  bool Accept(Address native_context_address) const {
    if (native_context_address_ == kNullAddress) return true;
    return (native_context_address & ~kHeapObjectTag) ==
           native_context_address_;
  }

  // Update the context's tracked address based on VM-thread events.
  void set_native_context_address(Address address) {
    native_context_address_ = address;
  }
  Address native_context_address() const { return native_context_address_; }

 private:
  Address native_context_address_;
};

284 285
class ProfileTree;

286
class V8_EXPORT_PRIVATE ProfileNode {
287
 public:
288 289
  inline ProfileNode(ProfileTree* tree, CodeEntry* entry, ProfileNode* parent,
                     int line_number = 0);
290
  ~ProfileNode();
291 292
  ProfileNode(const ProfileNode&) = delete;
  ProfileNode& operator=(const ProfileNode&) = delete;
293

294 295 296 297
  ProfileNode* FindChild(
      CodeEntry* entry,
      int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
  ProfileNode* FindOrAddChild(CodeEntry* entry, int line_number = 0);
298 299
  void IncrementSelfTicks() { ++self_ticks_; }
  void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
300
  void IncrementLineTicks(int src_line);
301

302 303
  CodeEntry* entry() const { return entry_; }
  unsigned self_ticks() const { return self_ticks_; }
304
  const std::vector<ProfileNode*>* children() const { return &children_list_; }
305
  unsigned id() const { return id_; }
306
  ProfileNode* parent() const { return parent_; }
307 308 309
  int line_number() const {
    return line_number_ != 0 ? line_number_ : entry_->line_number();
  }
310
  CpuProfileNode::SourceType source_type() const;
311

312 313 314
  unsigned int GetHitLineCount() const {
    return static_cast<unsigned int>(line_ticks_.size());
  }
315 316
  bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
                    unsigned int length) const;
317
  void CollectDeoptInfo(CodeEntry* entry);
318 319 320
  const std::vector<CpuProfileDeoptInfo>& deopt_infos() const {
    return deopt_infos_;
  }
321
  Isolate* isolate() const;
322

323
  void Print(int indent) const;
324

325
 private:
326 327 328 329 330
  struct Equals {
    bool operator()(CodeEntryAndLineNumber lhs,
                    CodeEntryAndLineNumber rhs) const {
      return lhs.code_entry->IsSameFunctionAs(rhs.code_entry) &&
             lhs.line_number == rhs.line_number;
331 332
    }
  };
333 334
  struct Hasher {
    std::size_t operator()(CodeEntryAndLineNumber pair) const {
335
      return pair.code_entry->GetHash() ^ ComputeUnseededHash(pair.line_number);
336
    }
337
  };
338

339
  ProfileTree* tree_;
340 341
  CodeEntry* entry_;
  unsigned self_ticks_;
342
  std::unordered_map<CodeEntryAndLineNumber, ProfileNode*, Hasher, Equals>
343
      children_;
344
  int line_number_;
345
  std::vector<ProfileNode*> children_list_;
346
  ProfileNode* parent_;
347
  unsigned id_;
348 349
  // maps line number --> number of ticks
  std::unordered_map<int, int> line_ticks_;
350

351
  std::vector<CpuProfileDeoptInfo> deopt_infos_;
352 353
};

354 355
class CodeEntryStorage;

356
class V8_EXPORT_PRIVATE ProfileTree {
357
 public:
358
  explicit ProfileTree(Isolate* isolate, CodeEntryStorage* storage = nullptr);
359
  ~ProfileTree();
360 361
  ProfileTree(const ProfileTree&) = delete;
  ProfileTree& operator=(const ProfileTree&) = delete;
362

363
  using ProfilingMode = v8::CpuProfilingMode;
364

365
  ProfileNode* AddPathFromEnd(
366
      const std::vector<CodeEntry*>& path,
367 368
      int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
      bool update_stats = true);
369 370 371 372
  ProfileNode* AddPathFromEnd(
      const ProfileStackTrace& path,
      int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
      bool update_stats = true,
373
      ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
374
  ProfileNode* root() const { return root_; }
375 376
  unsigned next_node_id() { return next_node_id_++; }

377
  void Print() const { root_->Print(0); }
378

379 380
  Isolate* isolate() const { return isolate_; }

381
  void EnqueueNode(const ProfileNode* node) { pending_nodes_.push_back(node); }
382 383 384 385 386
  size_t pending_nodes_count() const { return pending_nodes_.size(); }
  std::vector<const ProfileNode*> TakePendingNodes() {
    return std::move(pending_nodes_);
  }

387 388
  CodeEntryStorage* code_entries() { return code_entries_; }

389 390
 private:
  template <typename Callback>
391
  void TraverseDepthFirst(Callback* callback);
392

393 394
  std::vector<const ProfileNode*> pending_nodes_;

395
  unsigned next_node_id_;
396
  Isolate* isolate_;
397 398
  CodeEntryStorage* const code_entries_;
  ProfileNode* root_;
399 400
};

401
class CpuProfiler;
402

403
class CpuProfile {
404
 public:
405 406 407 408
  struct SampleInfo {
    ProfileNode* node;
    base::TimeTicks timestamp;
    int line;
409 410
    StateTag state_tag;
    EmbedderStateTag embedder_state_tag;
411 412
  };

413 414 415
  V8_EXPORT_PRIVATE CpuProfile(
      CpuProfiler* profiler, const char* title, CpuProfilingOptions options,
      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
416 417
  CpuProfile(const CpuProfile&) = delete;
  CpuProfile& operator=(const CpuProfile&) = delete;
418

419 420 421
  // Checks whether or not the given TickSample should be (sub)sampled, given
  // the sampling interval of the profiler that recorded it (in microseconds).
  V8_EXPORT_PRIVATE bool CheckSubsample(base::TimeDelta sampling_interval);
422
  // Add pc -> ... -> main() call path to the profile.
423
  void AddPath(base::TimeTicks timestamp, const ProfileStackTrace& path,
424
               int src_line, bool update_stats,
425 426
               base::TimeDelta sampling_interval, StateTag state,
               EmbedderStateTag embedder_state);
427
  void FinishProfile();
428

429 430
  const char* title() const { return title_; }
  const ProfileTree* top_down() const { return &top_down_; }
431

432
  int samples_count() const { return static_cast<int>(samples_.size()); }
433
  const SampleInfo& sample(int index) const { return samples_[index]; }
434

435 436 437 438
  int64_t sampling_interval_us() const {
    return options_.sampling_interval_us();
  }

439 440
  base::TimeTicks start_time() const { return start_time_; }
  base::TimeTicks end_time() const { return end_time_; }
441
  CpuProfiler* cpu_profiler() const { return profiler_; }
442
  ContextFilter& context_filter() { return context_filter_; }
443

444 445
  void UpdateTicksScale();

446
  V8_EXPORT_PRIVATE void Print() const;
447 448

 private:
449 450
  void StreamPendingTraceEvents();

451
  const char* title_;
452
  const CpuProfilingOptions options_;
453
  std::unique_ptr<DiscardedSamplesDelegate> delegate_;
454
  ContextFilter context_filter_;
455 456
  base::TimeTicks start_time_;
  base::TimeTicks end_time_;
457
  std::deque<SampleInfo> samples_;
458
  ProfileTree top_down_;
459
  CpuProfiler* const profiler_;
460
  size_t streaming_next_sample_;
461
  uint32_t id_;
462 463 464
  // Number of microseconds worth of profiler ticks that should elapse before
  // the next sample is recorded.
  base::TimeDelta next_sample_delta_;
465 466

  static std::atomic<uint32_t> last_id_;
467 468
};

469 470 471 472 473 474 475 476 477 478 479 480
class CpuProfileMaxSamplesCallbackTask : public v8::Task {
 public:
  CpuProfileMaxSamplesCallbackTask(
      std::unique_ptr<DiscardedSamplesDelegate> delegate)
      : delegate_(std::move(delegate)) {}

  void Run() override { delegate_->Notify(); }

 private:
  std::unique_ptr<DiscardedSamplesDelegate> delegate_;
};

481
class V8_EXPORT_PRIVATE CodeMap {
482
 public:
483
  explicit CodeMap(CodeEntryStorage& storage);
484
  ~CodeMap();
485 486
  CodeMap(const CodeMap&) = delete;
  CodeMap& operator=(const CodeMap&) = delete;
487

488 489
  // Adds the given CodeEntry to the CodeMap. The CodeMap takes ownership of
  // the CodeEntry.
490 491
  void AddCode(Address addr, CodeEntry* entry, unsigned size);
  void MoveCode(Address from, Address to);
492 493 494
  // Attempts to remove the given CodeEntry from the CodeMap.
  // Returns true iff the entry was found and removed.
  bool RemoveCode(CodeEntry*);
495
  void ClearCodesInRange(Address start, Address end);
496
  CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
497
  void Print();
498
  size_t size() const { return code_map_.size(); }
499

500 501
  size_t GetEstimatedMemoryUsage() const;

502 503
  CodeEntryStorage& code_entries() { return code_entries_; }

504 505
  void Clear();

506
 private:
507 508
  struct CodeEntryMapInfo {
    CodeEntry* entry;
509
    unsigned size;
510 511
  };

512
  std::multimap<Address, CodeEntryMapInfo> code_map_;
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
  CodeEntryStorage& code_entries_;
};

// Manages the lifetime of CodeEntry objects, and stores shared resources
// between them.
class V8_EXPORT_PRIVATE CodeEntryStorage {
 public:
  template <typename... Args>
  static CodeEntry* Create(Args&&... args) {
    CodeEntry* const entry = new CodeEntry(std::forward<Args>(args)...);
    entry->mark_ref_counted();
    return entry;
  }

  void AddRef(CodeEntry*);
  void DecRef(CodeEntry*);

  StringsStorage& strings() { return function_and_resource_names_; }

 private:
  StringsStorage function_and_resource_names_;
534 535
};

536
class V8_EXPORT_PRIVATE CpuProfilesCollection {
537
 public:
538
  explicit CpuProfilesCollection(Isolate* isolate);
539 540
  CpuProfilesCollection(const CpuProfilesCollection&) = delete;
  CpuProfilesCollection& operator=(const CpuProfilesCollection&) = delete;
541

542
  void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
543 544 545
  CpuProfilingStatus StartProfiling(
      const char* title, CpuProfilingOptions options = {},
      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
546

547
  CpuProfile* StopProfiling(const char* title);
548 549 550
  std::vector<std::unique_ptr<CpuProfile>>* profiles() {
    return &finished_profiles_;
  }
551
  const char* GetName(Name name) { return resource_names_.GetName(name); }
552
  bool IsLastProfile(const char* title);
553
  void RemoveProfile(CpuProfile* profile);
554

555 556 557 558 559
  // Finds a common sampling interval dividing each CpuProfile's interval,
  // rounded up to the nearest multiple of the CpuProfiler's sampling interval.
  // Returns 0 if no profiles are attached.
  base::TimeDelta GetCommonSamplingInterval() const;

560
  // Called from profile generator thread.
561 562 563 564 565 566
  void AddPathToCurrentProfiles(
      base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
      bool update_stats, base::TimeDelta sampling_interval, StateTag state,
      EmbedderStateTag embedder_state_tag,
      Address native_context_address = kNullAddress,
      Address native_embedder_context_address = kNullAddress);
567 568 569

  // Called from profile generator thread.
  void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
570

571 572 573
  // Limits the number of profiles that can be simultaneously collected.
  static const int kMaxSimultaneousProfiles = 100;

574
 private:
lpy's avatar
lpy committed
575
  StringsStorage resource_names_;
576
  std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
577
  CpuProfiler* profiler_;
578

579
  // Accessed by VM thread and profile generator thread.
580
  std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
581
  base::Semaphore current_profiles_semaphore_;
582 583
};

584 585
}  // namespace internal
}  // namespace v8
586

587
#endif  // V8_PROFILER_PROFILE_GENERATOR_H_