v8-profiler.h 32.9 KB
Newer Older
1
// Copyright 2010 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4 5 6 7

#ifndef V8_V8_PROFILER_H_
#define V8_V8_PROFILER_H_

8
#include <limits.h>
9
#include <memory>
10
#include <unordered_set>
11
#include <vector>
12

13
#include "v8.h"  // NOLINT(build/include)
14 15 16 17 18 19

/**
 * Profiler support for the V8 JavaScript engine.
 */
namespace v8 {

20
class HeapGraphNode;
21
struct HeapStatsUpdate;
22

23 24
using NativeObject = void*;
using SnapshotObjectId = uint32_t;
25 26 27 28 29 30

struct CpuProfileDeoptFrame {
  int script_id;
  size_t position;
};

31 32 33 34
namespace internal {
class CpuProfile;
}  // namespace internal

thakis's avatar
thakis committed
35
}  // namespace v8
36 37

#ifdef V8_OS_WIN
thakis's avatar
thakis committed
38
template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
39 40
#endif

thakis's avatar
thakis committed
41
namespace v8 {
42 43 44 45 46 47 48

struct V8_EXPORT CpuProfileDeoptInfo {
  /** A pointer to a static string owned by v8. */
  const char* deopt_reason;
  std::vector<CpuProfileDeoptFrame> stack;
};

thakis's avatar
thakis committed
49
}  // namespace v8
50 51

#ifdef V8_OS_WIN
thakis's avatar
thakis committed
52
template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
53 54
#endif

thakis's avatar
thakis committed
55
namespace v8 {
56

57 58 59
/**
 * CpuProfileNode represents a node in a call graph.
 */
60
class V8_EXPORT CpuProfileNode {
61
 public:
62 63 64 65 66 67 68 69
  struct LineTick {
    /** The 1-based number of the source line where the function originates. */
    int line;

    /** The count of samples associated with the source line. */
    unsigned int hit_count;
  };

70 71 72 73 74 75 76 77 78 79 80 81 82 83
  // An annotation hinting at the source of a CpuProfileNode.
  enum SourceType {
    // User-supplied script with associated resource information.
    kScript = 0,
    // Native scripts and provided builtins.
    kBuiltin = 1,
    // Callbacks into native code.
    kCallback = 2,
    // VM-internal functions or state.
    kInternal = 3,
    // A node that failed to symbolize.
    kUnresolved = 4,
  };

84
  /** Returns function name (empty string for anonymous functions.) */
85
  Local<String> GetFunctionName() const;
86

87 88 89 90 91 92 93
  /**
   * Returns function name (empty string for anonymous functions.)
   * The string ownership is *not* passed to the caller. It stays valid until
   * profile is deleted. The function is thread safe.
   */
  const char* GetFunctionNameStr() const;

94 95 96
  /** Returns id of the script where function is located. */
  int GetScriptId() const;

97
  /** Returns resource name for script from where the function originates. */
98
  Local<String> GetScriptResourceName() const;
99

100 101 102 103 104 105 106
  /**
   * Returns resource name for script from where the function originates.
   * The string ownership is *not* passed to the caller. It stays valid until
   * profile is deleted. The function is thread safe.
   */
  const char* GetScriptResourceNameStr() const;

107 108 109 110 111 112
  /**
   * Return true if the script from where the function originates is flagged as
   * being shared cross-origin.
   */
  bool IsScriptSharedCrossOrigin() const;

113 114 115 116 117 118
  /**
   * Returns the number, 1-based, of the line where the function originates.
   * kNoLineNumberInfo if no line number information is available.
   */
  int GetLineNumber() const;

119 120 121 122 123 124
  /**
   * Returns 1-based number of the column where the function originates.
   * kNoColumnNumberInfo if no column number information is available.
   */
  int GetColumnNumber() const;

125 126 127 128 129 130 131 132 133 134 135 136
  /**
   * Returns the number of the function's source lines that collect the samples.
   */
  unsigned int GetHitLineCount() const;

  /** Returns the set of source lines that collect the samples.
   *  The caller allocates buffer and responsible for releasing it.
   *  True if all available entries are copied, otherwise false.
   *  The function copies nothing if buffer is not large enough.
   */
  bool GetLineTicks(LineTick* entries, unsigned int length) const;

137 138 139 140 141
  /** Returns bailout reason for the function
    * if the optimization was disabled for it.
    */
  const char* GetBailoutReason() const;

142 143 144 145 146
  /**
    * Returns the count of samples where the function was currently executing.
    */
  unsigned GetHitCount() const;

147 148 149
  /** Returns id of the node. The id is unique within the tree */
  unsigned GetNodeId() const;

150 151 152 153 154
  /**
   * Gets the type of the source which the node was captured from.
   */
  SourceType GetSourceType() const;

155 156 157 158 159 160
  /** Returns child nodes count of the node. */
  int GetChildrenCount() const;

  /** Retrieves a child node by index. */
  const CpuProfileNode* GetChild(int index) const;

161 162 163
  /** Retrieves the ancestor node, or null if the root. */
  const CpuProfileNode* GetParent() const;

164 165 166
  /** Retrieves deopt infos for the node. */
  const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;

167
  static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
168
  static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
169 170 171 172
};


/**
173 174
 * CpuProfile contains a CPU profile in a form of top-down call tree
 * (from main() down to functions that do all the work).
175
 */
176
class V8_EXPORT CpuProfile {
177 178
 public:
  /** Returns CPU profile title. */
179
  Local<String> GetTitle() const;
180 181 182

  /** Returns the root node of the top down call tree. */
  const CpuProfileNode* GetTopDownRoot() const;
183

184
  /**
185 186 187
   * Returns number of samples recorded. The samples are not recorded unless
   * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
   */
188 189 190
  int GetSamplesCount() const;

  /**
191 192 193
   * Returns profile node corresponding to the top frame the sample at
   * the given index.
   */
194 195
  const CpuProfileNode* GetSample(int index) const;

196
  /**
197 198 199 200 201 202 203 204 205 206
   * Returns the timestamp of the sample. The timestamp is the number of
   * microseconds since some unspecified starting point.
   * The point is equal to the starting point used by GetStartTime.
   */
  int64_t GetSampleTimestamp(int index) const;

  /**
   * Returns time when the profile recording was started (in microseconds)
   * since some unspecified starting point.
   */
207
  int64_t GetStartTime() const;
208 209

  /**
210 211 212 213
   * Returns time when the profile recording was stopped (in microseconds)
   * since some unspecified starting point.
   * The point is equal to the starting point used by GetStartTime.
   */
214
  int64_t GetEndTime() const;
215

216 217 218 219 220
  /**
   * Deletes the profile and removes it from CpuProfiler's list.
   * All pointers to nodes previously returned become invalid.
   */
  void Delete();
221 222
};

223 224 225 226 227 228 229 230 231 232
enum CpuProfilingMode {
  // In the resulting CpuProfile tree, intermediate nodes in a stack trace
  // (from the root to a leaf) will have line numbers that point to the start
  // line of the function, rather than the line of the callsite of the child.
  kLeafNodeLineNumbers,
  // In the resulting CpuProfile tree, nodes are separated based on the line
  // number of their callsite in their parent.
  kCallerLineNumbers,
};

233 234 235 236 237 238 239 240 241
// Determines how names are derived for functions sampled.
enum CpuProfilingNamingMode {
  // Use the immediate name of functions at compilation time.
  kStandardNaming,
  // Use more verbose naming for functions without names, inferred from scope
  // where possible.
  kDebugNaming,
};

242 243 244 245 246 247 248 249 250
enum CpuProfilingLoggingMode {
  // Enables logging when a profile is active, and disables logging when all
  // profiles are detached.
  kLazyLogging,
  // Enables logging for the lifetime of the CpuProfiler. Calls to
  // StartRecording are faster, at the expense of runtime overhead.
  kEagerLogging,
};

251
/**
252
 * Optional profiling attributes.
253
 */
254
class V8_EXPORT CpuProfilingOptions {
255
 public:
256 257 258
  // Indicates that the sample buffer size should not be explicitly limited.
  static const unsigned kNoSampleLimit = UINT_MAX;

259 260 261 262 263
  /**
   * \param mode Type of computation of stack frame line numbers.
   * \param max_samples The maximum number of samples that should be recorded by
   *                    the profiler. Samples obtained after this limit will be
   *                    discarded.
264 265 266 267 268 269 270
   * \param sampling_interval_us controls the profile-specific target
   *                             sampling interval. The provided sampling
   *                             interval will be snapped to the next lowest
   *                             non-zero multiple of the profiler's sampling
   *                             interval, set via SetSamplingInterval(). If
   *                             zero, the sampling interval will be equal to
   *                             the profiler's sampling interval.
271
   */
272 273 274 275
  CpuProfilingOptions(
      CpuProfilingMode mode = kLeafNodeLineNumbers,
      unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
      MaybeLocal<Context> filter_context = MaybeLocal<Context>());
276 277 278

  CpuProfilingMode mode() const { return mode_; }
  unsigned max_samples() const { return max_samples_; }
279
  int sampling_interval_us() const { return sampling_interval_us_; }
280 281

 private:
282 283 284 285 286
  friend class internal::CpuProfile;

  bool has_filter_context() const { return !filter_context_.IsEmpty(); }
  void* raw_filter_context() const;

287 288
  CpuProfilingMode mode_;
  unsigned max_samples_;
289
  int sampling_interval_us_;
290
  CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
291 292 293 294 295 296 297 298
};

/**
 * Interface for controlling CPU profiling. Instance of the
 * profiler can be created using v8::CpuProfiler::New method.
 */
class V8_EXPORT CpuProfiler {
 public:
299 300 301 302 303
  /**
   * Creates a new CPU profiler for the |isolate|. The isolate must be
   * initialized. The profiler object must be disposed after use by calling
   * |Dispose| method.
   */
304
  static CpuProfiler* New(Isolate* isolate,
305 306
                          CpuProfilingNamingMode = kDebugNaming,
                          CpuProfilingLoggingMode = kLazyLogging);
307

308 309 310 311 312 313 314
  /**
   * Synchronously collect current stack sample in all profilers attached to
   * the |isolate|. The call does not affect number of ticks recorded for
   * the current top node.
   */
  static void CollectSample(Isolate* isolate);

315 316 317 318 319
  /**
   * Disposes the CPU profiler object.
   */
  void Dispose();

320
  /**
321 322 323
   * Changes default CPU profiler sampling interval to the specified number
   * of microseconds. Default interval is 1000us. This method must be called
   * when there are no profiles being recorded.
324
   */
325
  void SetSamplingInterval(int us);
326

327 328 329 330 331 332 333 334 335
  /**
   * Sets whether or not the profiler should prioritize consistency of sample
   * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
   * may result in greater variance in sample timings from the platform's
   * scheduler. Defaults to enabled. This method must be called when there are
   * no profiles being recorded.
   */
  void SetUsePreciseSampling(bool);

336
  /**
337 338 339 340 341 342 343 344 345
   * Starts collecting a CPU profile. Title may be an empty string. Several
   * profiles may be collected at once. Attempts to start collecting several
   * profiles with the same title are silently ignored.
   */
  void StartProfiling(Local<String> title, CpuProfilingOptions options);

  /**
   * Starts profiling with the same semantics as above, except with expanded
   * parameters.
346 347 348
   *
   * |record_samples| parameter controls whether individual samples should
   * be recorded in addition to the aggregated tree.
349 350 351 352
   *
   * |max_samples| controls the maximum number of samples that should be
   * recorded by the profiler. Samples obtained after this limit will be
   * discarded.
353
   */
354 355 356
  void StartProfiling(
      Local<String> title, CpuProfilingMode mode, bool record_samples = false,
      unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
357 358 359 360 361
  /**
   * The same as StartProfiling above, but the CpuProfilingMode defaults to
   * kLeafNodeLineNumbers mode, which was the previous default behavior of the
   * profiler.
   */
362
  void StartProfiling(Local<String> title, bool record_samples = false);
363

364 365 366 367
  /**
   * Stops collecting CPU profile with a given title and returns it.
   * If the title given is empty, finishes the last profile started.
   */
368
  CpuProfile* StopProfiling(Local<String> title);
369

370 371 372 373 374 375
  /**
   * Generate more detailed source positions to code objects. This results in
   * better results when mapping profiling samples to script source.
   */
  static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);

376 377 378 379 380
 private:
  CpuProfiler();
  ~CpuProfiler();
  CpuProfiler(const CpuProfiler&);
  CpuProfiler& operator=(const CpuProfiler&);
381 382
};

383 384
/**
 * HeapSnapshotEdge represents a directed connection between heap
385
 * graph nodes: from retainers to retained nodes.
386
 */
387
class V8_EXPORT HeapGraphEdge {
388 389
 public:
  enum Type {
390 391 392
    kContextVariable = 0,  // A variable from a function context.
    kElement = 1,          // An element of an array.
    kProperty = 2,         // A named object property.
393 394 395 396 397
    kInternal = 3,         // A link that can't be accessed from JS,
                           // thus, its name isn't a real property name
                           // (e.g. parts of a ConsString).
    kHidden = 4,           // A link that is needed for proper sizes
                           // calculation, but may be hidden from user.
398
    kShortcut = 5,         // A link that must not be followed during
399
                           // sizes calculation.
400
    kWeak = 6              // A weak reference (ignored by the GC).
401 402 403 404 405 406 407 408 409
  };

  /** Returns edge type (see HeapGraphEdge::Type). */
  Type GetType() const;

  /**
   * Returns edge name. This can be a variable name, an element index, or
   * a property name.
   */
410
  Local<Value> GetName() const;
411 412 413 414 415 416 417 418 419 420 421 422

  /** Returns origin node. */
  const HeapGraphNode* GetFromNode() const;

  /** Returns destination node. */
  const HeapGraphNode* GetToNode() const;
};


/**
 * HeapGraphNode represents a node in a heap graph.
 */
423
class V8_EXPORT HeapGraphNode {
424 425
 public:
  enum Type {
426 427 428 429 430 431 432 433 434
    kHidden = 0,         // Hidden node, may be filtered when shown to user.
    kArray = 1,          // An array of elements.
    kString = 2,         // A string.
    kObject = 3,         // A JS object (except for arrays and strings).
    kCode = 4,           // Compiled code.
    kClosure = 5,        // Function closure.
    kRegExp = 6,         // RegExp.
    kHeapNumber = 7,     // Number stored in the heap.
    kNative = 8,         // Native object (not from V8 heap).
435
    kSynthetic = 9,      // Synthetic object, usually used for grouping
436 437 438
                         // snapshot items together.
    kConsString = 10,    // Concatenated string. A pair of pointers to strings.
    kSlicedString = 11,  // Sliced string. A fragment of another string.
439 440
    kSymbol = 12,        // A Symbol (ES6).
    kBigInt = 13         // BigInt.
441 442 443 444 445 446 447 448 449 450
  };

  /** Returns node type (see HeapGraphNode::Type). */
  Type GetType() const;

  /**
   * Returns node name. Depending on node's type this can be the name
   * of the constructor (for objects), the name of the function (for
   * closures), string value, or an empty string (for compiled code).
   */
451
  Local<String> GetName() const;
452

453 454
  /**
   * Returns node id. For the same heap object, the id remains the same
455
   * across all snapshots.
456
   */
457
  SnapshotObjectId GetId() const;
458

459 460
  /** Returns node's own size, in bytes. */
  size_t GetShallowSize() const;
461 462 463 464 465 466 467 468 469

  /** Returns child nodes count of the node. */
  int GetChildrenCount() const;

  /** Retrieves a child by index. */
  const HeapGraphEdge* GetChild(int index) const;
};


470 471 472 473 474 475 476 477 478
/**
 * An interface for exporting data from V8, using "push" model.
 */
class V8_EXPORT OutputStream {  // NOLINT
 public:
  enum WriteResult {
    kContinue = 0,
    kAbort = 1
  };
479
  virtual ~OutputStream() = default;
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
  /** Notify about the end of stream. */
  virtual void EndOfStream() = 0;
  /** Get preferred output chunk size. Called only once. */
  virtual int GetChunkSize() { return 1024; }
  /**
   * Writes the next chunk of snapshot data into the stream. Writing
   * can be stopped by returning kAbort as function result. EndOfStream
   * will not be called in case writing was aborted.
   */
  virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
  /**
   * Writes the next chunk of heap stats data into the stream. Writing
   * can be stopped by returning kAbort as function result. EndOfStream
   * will not be called in case writing was aborted.
   */
  virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
    return kAbort;
497
  }
498 499 500
};


501 502 503
/**
 * HeapSnapshots record the state of the JS heap at some moment.
 */
504
class V8_EXPORT HeapSnapshot {
505
 public:
506 507
  enum SerializationFormat {
    kJSON = 0  // See format description near 'Serialize' method.
508 509
  };

510
  /** Returns the root node of the heap graph. */
511 512
  const HeapGraphNode* GetRoot() const;

513
  /** Returns a node by its id. */
514
  const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
515

516 517 518 519 520 521
  /** Returns total nodes count in the snapshot. */
  int GetNodesCount() const;

  /** Returns a node by index. */
  const HeapGraphNode* GetNode(int index) const;

522 523 524
  /** Returns a max seen JS object Id. */
  SnapshotObjectId GetMaxSnapshotJSObjectId() const;

525 526 527 528 529 530 531
  /**
   * Deletes the snapshot and removes it from HeapProfiler's list.
   * All pointers to nodes, edges and paths previously returned become
   * invalid.
   */
  void Delete();

532 533 534 535
  /**
   * Prepare a serialized representation of the snapshot. The result
   * is written into the stream provided in chunks of specified size.
   * The total length of the serialized snapshot is unknown in
536
   * advance, it can be roughly equal to JS heap size (that means,
537 538 539 540 541 542
   * it can be really big - tens of megabytes).
   *
   * For the JSON format, heap contents are represented as an object
   * with the following structure:
   *
   *  {
543 544 545 546 547 548 549 550 551 552
   *    snapshot: {
   *      title: "...",
   *      uid: nnn,
   *      meta: { meta-info },
   *      node_count: nnn,
   *      edge_count: nnn
   *    },
   *    nodes: [nodes array],
   *    edges: [edges array],
   *    strings: [strings array]
553 554
   *  }
   *
555 556
   * Nodes reference strings, other nodes, and edges by their indexes
   * in corresponding arrays.
557
   */
558 559
  void Serialize(OutputStream* stream,
                 SerializationFormat format = kJSON) const;
560 561 562
};


563 564 565 566 567 568 569 570 571 572
/**
 * An interface for reporting progress and controlling long-running
 * activities.
 */
class V8_EXPORT ActivityControl {  // NOLINT
 public:
  enum ControlOption {
    kContinue = 0,
    kAbort = 1
  };
573
  virtual ~ActivityControl() = default;
574 575 576 577 578 579 580
  /**
   * Notify about current progress. The activity can be stopped by
   * returning kAbort as the callback result.
   */
  virtual ControlOption ReportProgressValue(int done, int total) = 0;
};

581

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/**
 * AllocationProfile is a sampled profile of allocations done by the program.
 * This is structured as a call-graph.
 */
class V8_EXPORT AllocationProfile {
 public:
  struct Allocation {
    /**
     * Size of the sampled allocation object.
     */
    size_t size;

    /**
     * The number of objects of such size that were sampled.
     */
    unsigned int count;
  };

  /**
   * Represents a node in the call-graph.
   */
  struct Node {
    /**
     * Name of the function. May be empty for anonymous functions or if the
     * script corresponding to this function has been unloaded.
     */
    Local<String> name;

    /**
     * Name of the script containing the function. May be empty if the script
     * name is not available, or if the script has been unloaded.
     */
    Local<String> script_name;

    /**
     * id of the script where the function is located. May be equal to
     * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
     */
    int script_id;

    /**
     * Start position of the function in the script.
     */
    int start_position;

    /**
     * 1-indexed line number where the function starts. May be
     * kNoLineNumberInfo if no line number information is available.
     */
    int line_number;

    /**
     * 1-indexed column number where the function starts. May be
     * kNoColumnNumberInfo if no line number information is available.
     */
    int column_number;

639 640 641 642 643
    /**
     * Unique id of the node.
     */
    uint32_t node_id;

644 645 646 647 648 649 650 651 652 653 654 655 656
    /**
     * List of callees called from this node for which we have sampled
     * allocations. The lifetime of the children is scoped to the containing
     * AllocationProfile.
     */
    std::vector<Node*> children;

    /**
     * List of self allocations done by this node in the call-graph.
     */
    std::vector<Allocation> allocations;
  };

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
  /**
   * Represent a single sample recorded for an allocation.
   */
  struct Sample {
    /**
     * id of the node in the profile tree.
     */
    uint32_t node_id;

    /**
     * Size of the sampled allocation object.
     */
    size_t size;

    /**
     * The number of objects of such size that were sampled.
     */
    unsigned int count;

    /**
     * Unique time-ordered id of the allocation sample. Can be used to track
     * what samples were added or removed between two snapshots.
     */
    uint64_t sample_id;
  };

683 684 685 686 687 688
  /**
   * Returns the root node of the call-graph. The root node corresponds to an
   * empty JS call-stack. The lifetime of the returned Node* is scoped to the
   * containing AllocationProfile.
   */
  virtual Node* GetRootNode() = 0;
689
  virtual const std::vector<Sample>& GetSamples() = 0;
690

691
  virtual ~AllocationProfile() = default;
692 693 694 695 696

  static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
  static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};

697 698 699 700 701 702 703 704
/**
 * An object graph consisting of embedder objects and V8 objects.
 * Edges of the graph are strong references between the objects.
 * The embedder can build this graph during heap snapshot generation
 * to include the embedder objects in the heap snapshot.
 * Usage:
 * 1) Define derived class of EmbedderGraph::Node for embedder objects.
 * 2) Set the build embedder graph callback on the heap profiler using
705
 *    HeapProfiler::AddBuildEmbedderGraphCallback.
706 707 708 709 710 711 712 713 714 715 716 717 718
 * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
 *    node1 to node2.
 * 4) To represent references from/to V8 object, construct V8 nodes using
 *    graph->V8Node(value).
 */
class V8_EXPORT EmbedderGraph {
 public:
  class Node {
   public:
    Node() = default;
    virtual ~Node() = default;
    virtual const char* Name() = 0;
    virtual size_t SizeInBytes() = 0;
719 720 721 722 723 724
    /**
     * The corresponding V8 wrapper node if not null.
     * During heap snapshot generation the embedder node and the V8 wrapper
     * node will be merged into one node to simplify retaining paths.
     */
    virtual Node* WrapperNode() { return nullptr; }
725 726 727
    virtual bool IsRootNode() { return false; }
    /** Must return true for non-V8 nodes. */
    virtual bool IsEmbedderNode() { return true; }
728 729 730 731
    /**
     * Optional name prefix. It is used in Chrome for tagging detached nodes.
     */
    virtual const char* NamePrefix() { return nullptr; }
732

733 734 735 736 737 738
    /**
     * Returns the NativeObject that can be used for querying the
     * |HeapSnapshot|.
     */
    virtual NativeObject GetNativeObject() { return nullptr; }

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
    Node(const Node&) = delete;
    Node& operator=(const Node&) = delete;
  };

  /**
   * Returns a node corresponding to the given V8 value. Ownership is not
   * transferred. The result pointer is valid while the graph is alive.
   */
  virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;

  /**
   * Adds the given node to the graph and takes ownership of the node.
   * Returns a raw pointer to the node that is valid while the graph is alive.
   */
  virtual Node* AddNode(std::unique_ptr<Node> node) = 0;

  /**
756 757
   * Adds an edge that represents a strong reference from the given
   * node |from| to the given node |to|. The nodes must be added to the graph
758
   * before calling this function.
759 760 761
   *
   * If name is nullptr, the edge will have auto-increment indexes, otherwise
   * it will be named accordingly.
762
   */
763
  virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
764 765 766

  virtual ~EmbedderGraph() = default;
};
767

768
/**
769 770
 * Interface for controlling heap profiling. Instance of the
 * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
771
 */
772
class V8_EXPORT HeapProfiler {
773
 public:
774 775 776 777 778
  enum SamplingFlags {
    kSamplingNoFlags = 0,
    kSamplingForceGC = 1 << 0,
  };

779 780 781 782 783 784 785
  /**
   * Callback function invoked during heap snapshot generation to retrieve
   * the embedder object graph. The callback should use graph->AddEdge(..) to
   * add references between the objects.
   * The callback must not trigger garbage collection in V8.
   */
  typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
786 787 788
                                             v8::EmbedderGraph* graph,
                                             void* data);

789 790
  /** Returns the number of snapshots taken. */
  int GetSnapshotCount();
791

792 793
  /** Returns a snapshot by index. */
  const HeapSnapshot* GetHeapSnapshot(int index);
794

795 796 797 798
  /**
   * Returns SnapshotObjectId for a heap object referenced by |value| if
   * it has been seen by the heap profiler, kUnknownObjectId otherwise.
   */
799
  SnapshotObjectId GetObjectId(Local<Value> value);
800

801 802 803 804 805 806
  /**
   * Returns SnapshotObjectId for a native object referenced by |value| if it
   * has been seen by the heap profiler, kUnknownObjectId otherwise.
   */
  SnapshotObjectId GetObjectId(NativeObject value);

807 808 809 810
  /**
   * Returns heap object with given SnapshotObjectId if the object is alive,
   * otherwise empty handle is returned.
   */
811
  Local<Value> FindObjectById(SnapshotObjectId id);
812 813 814 815 816 817 818 819

  /**
   * Clears internal map from SnapshotObjectId to heap object. The new objects
   * will not be added into it unless a heap snapshot is taken or heap object
   * tracking is kicked off.
   */
  void ClearObjectIds();

820 821 822 823 824
  /**
   * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
   * it in case heap profiler cannot find id  for the object passed as
   * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
   */
825
  static const SnapshotObjectId kUnknownObjectId = 0;
826

827 828 829 830
  /**
   * Callback interface for retrieving user friendly names of global objects.
   */
  class ObjectNameResolver {
831
   public:
832 833 834 835
    /**
     * Returns name to be used in the heap snapshot for given node. Returned
     * string must stay alive until snapshot collection is completed.
     */
836 837
    virtual const char* GetName(Local<Object> object) = 0;

838
   protected:
839
    virtual ~ObjectNameResolver() = default;
840 841
  };

842
  /**
843
   * Takes a heap snapshot and returns it.
844
   */
845
  const HeapSnapshot* TakeHeapSnapshot(
846
      ActivityControl* control = nullptr,
847 848
      ObjectNameResolver* global_object_name_resolver = nullptr,
      bool treat_global_objects_as_roots = true);
849

850 851 852 853
  /**
   * Starts tracking of heap objects population statistics. After calling
   * this method, all heap objects relocations done by the garbage collector
   * are being registered.
854 855 856 857
   *
   * |track_allocations| parameter controls whether stack trace of each
   * allocation in the heap will be recorded and reported as part of
   * HeapSnapshot.
858
   */
859
  void StartTrackingHeapObjects(bool track_allocations = false);
860 861 862 863 864 865

  /**
   * Adds a new time interval entry to the aggregated statistics array. The
   * time interval entry contains information on the current heap objects
   * population size. The method also updates aggregated statistics and
   * reports updates for all previous time intervals via the OutputStream
866 867
   * object. Updates on each time interval are provided as a stream of the
   * HeapStatsUpdate structure instances.
868 869
   * If |timestamp_us| is supplied, timestamp of the new entry will be written
   * into it. The return value of the function is the last seen heap object Id.
870
   *
871
   * StartTrackingHeapObjects must be called before the first call to this
872 873
   * method.
   */
874
  SnapshotObjectId GetHeapStats(OutputStream* stream,
875
                                int64_t* timestamp_us = nullptr);
876 877 878 879

  /**
   * Stops tracking of heap objects population statistics, cleans up all
   * collected data. StartHeapObjectsTracking must be called again prior to
880
   * calling GetHeapStats next time.
881
   */
882
  void StopTrackingHeapObjects();
883

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
  /**
   * Starts gathering a sampling heap profile. A sampling heap profile is
   * similar to tcmalloc's heap profiler and Go's mprof. It samples object
   * allocations and builds an online 'sampling' heap profile. At any point in
   * time, this profile is expected to be a representative sample of objects
   * currently live in the system. Each sampled allocation includes the stack
   * trace at the time of allocation, which makes this really useful for memory
   * leak detection.
   *
   * This mechanism is intended to be cheap enough that it can be used in
   * production with minimal performance overhead.
   *
   * Allocations are sampled using a randomized Poisson process. On average, one
   * allocation will be sampled every |sample_interval| bytes allocated. The
   * |stack_depth| parameter controls the maximum number of stack frames to be
   * captured on each allocation.
   *
   * NOTE: This is a proof-of-concept at this point. Right now we only sample
   * newspace allocations. Support for paged space allocation (e.g. pre-tenured
   * objects, large objects, code objects, etc.) and native allocations
   * doesn't exist yet, but is anticipated in the future.
   *
   * Objects allocated before the sampling is started will not be included in
   * the profile.
   *
   * Returns false if a sampling heap profiler is already running.
   */
  bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
912 913
                                 int stack_depth = 16,
                                 SamplingFlags flags = kSamplingNoFlags);
914 915 916 917 918 919 920 921 922

  /**
   * Stops the sampling heap profile and discards the current profile.
   */
  void StopSamplingHeapProfiler();

  /**
   * Returns the sampled profile of allocations allocated (and still live) since
   * StartSamplingHeapProfiler was called. The ownership of the pointer is
923
   * transferred to the caller. Returns nullptr if sampling heap profiler is not
924 925 926 927
   * active.
   */
  AllocationProfile* GetAllocationProfile();

928 929 930 931
  /**
   * Deletes all snapshots taken. All previously returned pointers to
   * snapshots and their contents become invalid after this call.
   */
932
  void DeleteAllHeapSnapshots();
933

934 935 936 937
  void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
                                     void* data);
  void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
                                        void* data);
938

939 940 941 942 943 944
  /**
   * Default value of persistent handle class ID. Must not be used to
   * define a class. Can be used to reset a class of a persistent
   * handle.
   */
  static const uint16_t kPersistentHandleNoClassId = 0;
945

946 947 948 949 950
 private:
  HeapProfiler();
  ~HeapProfiler();
  HeapProfiler(const HeapProfiler&);
  HeapProfiler& operator=(const HeapProfiler&);
951 952
};

953 954
/**
 * A struct for exporting HeapStats data from V8, using "push" model.
955
 * See HeapProfiler::GetHeapStats.
956 957 958 959 960 961 962 963 964
 */
struct HeapStatsUpdate {
  HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
    : index(index), count(count), size(size) { }
  uint32_t index;  // Index of the time interval that was changed.
  uint32_t count;  // New value of count field for the interval with this index.
  uint32_t size;  // New value of size field for the interval with this index.
};

965 966 967 968 969 970 971 972 973 974 975
#define CODE_EVENTS_LIST(V) \
  V(Builtin)                \
  V(Callback)               \
  V(Eval)                   \
  V(Function)               \
  V(InterpretedFunction)    \
  V(Handler)                \
  V(BytecodeHandler)        \
  V(LazyCompile)            \
  V(RegExp)                 \
  V(Script)                 \
976 977
  V(Stub)                   \
  V(Relocation)
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009

/**
 * Note that this enum may be extended in the future. Please include a default
 * case if this enum is used in a switch statement.
 */
enum CodeEventType {
  kUnknownType = 0
#define V(Name) , k##Name##Type
  CODE_EVENTS_LIST(V)
#undef V
};

/**
 * Representation of a code creation event
 */
class V8_EXPORT CodeEvent {
 public:
  uintptr_t GetCodeStartAddress();
  size_t GetCodeSize();
  Local<String> GetFunctionName();
  Local<String> GetScriptName();
  int GetScriptLine();
  int GetScriptColumn();
  /**
   * NOTE (mmarchini): We can't allocate objects in the heap when we collect
   * existing code, and both the code type and the comment are not stored in the
   * heap, so we return those as const char*.
   */
  CodeEventType GetCodeType();
  const char* GetComment();

  static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1010 1011

  uintptr_t GetPreviousCodeStartAddress();
1012 1013 1014
};

/**
1015
 * Interface to listen to code creation and code relocation events.
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
 */
class V8_EXPORT CodeEventHandler {
 public:
  /**
   * Creates a new listener for the |isolate|. The isolate must be initialized.
   * The listener object must be disposed after use by calling |Dispose| method.
   * Multiple listeners can be created for the same isolate.
   */
  explicit CodeEventHandler(Isolate* isolate);
  virtual ~CodeEventHandler();

1027 1028 1029 1030 1031 1032 1033 1034
  /**
   * Handle is called every time a code object is created or moved. Information
   * about each code event will be available through the `code_event`
   * parameter.
   *
   * When the CodeEventType is kRelocationType, the code for this CodeEvent has
   * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
   */
1035 1036
  virtual void Handle(CodeEvent* code_event) = 0;

1037 1038 1039 1040
  /**
   * Call `Enable()` to starts listening to code creation and code relocation
   * events. These events will be handled by `Handle()`.
   */
1041
  void Enable();
1042 1043 1044 1045 1046

  /**
   * Call `Disable()` to stop listening to code creation and code relocation
   * events.
   */
1047 1048 1049 1050 1051 1052 1053 1054
  void Disable();

 private:
  CodeEventHandler();
  CodeEventHandler(const CodeEventHandler&);
  CodeEventHandler& operator=(const CodeEventHandler&);
  void* internal_listener_;
};
1055

1056 1057 1058 1059
}  // namespace v8


#endif  // V8_V8_PROFILER_H_