cpu-profiler.h 6.6 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_PROFILER_CPU_PROFILER_H_
#define V8_PROFILER_CPU_PROFILER_H_
7

8 9
#include <memory>

10
#include "src/allocation.h"
lpy's avatar
lpy committed
11
#include "src/base/atomic-utils.h"
12
#include "src/base/atomicops.h"
13
#include "src/base/platform/time.h"
14
#include "src/compiler.h"
15
#include "src/isolate.h"
16
#include "src/libsampler/sampler.h"
17
#include "src/locked-queue.h"
18
#include "src/profiler/circular-queue.h"
lpy's avatar
lpy committed
19
#include "src/profiler/profiler-listener.h"
lpy's avatar
lpy committed
20
#include "src/profiler/tick-sample.h"
21 22 23 24

namespace v8 {
namespace internal {

25 26 27 28 29 30 31
// Forward declarations.
class CodeEntry;
class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;

32 33 34 35 36 37
#define CODE_EVENTS_TYPE_LIST(V)                         \
  V(CODE_CREATION, CodeCreateEventRecord)                \
  V(CODE_MOVE, CodeMoveEventRecord)                      \
  V(CODE_DISABLE_OPT, CodeDisableOptEventRecord)         \
  V(CODE_DEOPT, CodeDeoptEventRecord)                    \
  V(REPORT_BUILTIN, ReportBuiltinEventRecord)
38 39 40 41 42 43 44 45 46 47 48 49 50


class CodeEventRecord {
 public:
#define DECLARE_TYPE(type, ignore) type,
  enum Type {
    NONE = 0,
    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
    NUMBER_OF_TYPES
  };
#undef DECLARE_TYPE

  Type type;
51
  mutable unsigned order;
52 53 54 55 56 57 58 59 60
};


class CodeCreateEventRecord : public CodeEventRecord {
 public:
  Address start;
  CodeEntry* entry;
  unsigned size;

61
  INLINE(void UpdateCodeMap(CodeMap* code_map));
62 63 64 65 66 67 68 69
};


class CodeMoveEventRecord : public CodeEventRecord {
 public:
  Address from;
  Address to;

70
  INLINE(void UpdateCodeMap(CodeMap* code_map));
71 72 73
};


74 75 76 77 78 79 80 81 82
class CodeDisableOptEventRecord : public CodeEventRecord {
 public:
  Address start;
  const char* bailout_reason;

  INLINE(void UpdateCodeMap(CodeMap* code_map));
};


83 84 85 86
class CodeDeoptEventRecord : public CodeEventRecord {
 public:
  Address start;
  const char* deopt_reason;
87
  SourcePosition position;
88
  int deopt_id;
lpy's avatar
lpy committed
89 90
  void* pc;
  int fp_to_sp_delta;
91 92 93 94 95

  INLINE(void UpdateCodeMap(CodeMap* code_map));
};


96 97 98 99 100 101 102 103 104
class ReportBuiltinEventRecord : public CodeEventRecord {
 public:
  Address start;
  Builtins::Name builtin_id;

  INLINE(void UpdateCodeMap(CodeMap* code_map));
};


105
class TickSampleEventRecord {
106
 public:
107 108 109
  // The parameterless constructor is used when we dequeue data from
  // the ticks buffer.
  TickSampleEventRecord() { }
110 111
  explicit TickSampleEventRecord(unsigned order) : order(order) { }

112
  unsigned order;
113
  TickSample sample;
114 115 116
};


117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
class CodeEventsContainer {
 public:
  explicit CodeEventsContainer(
      CodeEventRecord::Type type = CodeEventRecord::NONE) {
    generic.type = type;
  }
  union  {
    CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_TYPE
  };
};


132 133
// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
134
class ProfilerEventsProcessor : public base::Thread {
135
 public:
lpy's avatar
lpy committed
136
  ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
137
                          base::TimeDelta period);
138
  virtual ~ProfilerEventsProcessor();
139 140 141

  // Thread control.
  virtual void Run();
142
  void StopSynchronously();
143
  INLINE(bool running()) { return !!base::NoBarrier_Load(&running_); }
144
  void Enqueue(const CodeEventsContainer& event);
145

146
  // Puts current stack into tick sample events buffer.
147
  void AddCurrentStack(Isolate* isolate, bool update_stats = false);
148
  void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
149

150 151 152 153
  // Tick sample events are filled directly in the buffer of the circular
  // queue (because the structure is of fixed width, but usually not all
  // stack frame entries are filled.) This method returns a pointer to the
  // next record of the buffer.
154 155
  inline TickSample* StartTickSample();
  inline void FinishTickSample();
156

157 158 159 160 161
  // SamplingCircularQueue has stricter alignment requirements than a normal new
  // can fulfil, so we need to provide our own new/delete here.
  void* operator new(size_t size);
  void operator delete(void* ptr);

lpy's avatar
lpy committed
162 163
  sampler::Sampler* sampler() { return sampler_.get(); }

164 165
 private:
  // Called from events processing thread (Run() method.)
166
  bool ProcessCodeEvent();
167

168 169 170 171 172 173
  enum SampleProcessingResult {
    OneSampleProcessed,
    FoundSampleForNextCodeEvent,
    NoSamplesInQueue
  };
  SampleProcessingResult ProcessOneSample();
174

175
  ProfileGenerator* generator_;
lpy's avatar
lpy committed
176
  std::unique_ptr<sampler::Sampler> sampler_;
177
  base::Atomic32 running_;
178
  const base::TimeDelta period_;  // Samples & code events processing period.
179
  LockedQueue<CodeEventsContainer> events_buffer_;
180 181 182 183 184
  static const size_t kTickSampleBufferSize = 1 * MB;
  static const size_t kTickSampleQueueLength =
      kTickSampleBufferSize / sizeof(TickSampleEventRecord);
  SamplingCircularQueue<TickSampleEventRecord,
                        kTickSampleQueueLength> ticks_buffer_;
185
  LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
lpy's avatar
lpy committed
186
  base::AtomicNumber<unsigned> last_code_event_id_;
187
  unsigned last_processed_code_event_id_;
188 189
};

lpy's avatar
lpy committed
190
class CpuProfiler : public CodeEventObserver {
191
 public:
192
  explicit CpuProfiler(Isolate* isolate);
193

194
  CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
195 196 197
              ProfileGenerator* test_generator,
              ProfilerEventsProcessor* test_processor);

198
  ~CpuProfiler() override;
199

200
  void set_sampling_interval(base::TimeDelta value);
201
  void CollectSample();
202 203 204
  void StartProfiling(const char* title, bool record_samples = false);
  void StartProfiling(String* title, bool record_samples);
  CpuProfile* StopProfiling(const char* title);
205
  CpuProfile* StopProfiling(String* title);
206
  int GetProfilesCount();
207
  CpuProfile* GetProfile(int index);
208 209
  void DeleteAllProfiles();
  void DeleteProfile(CpuProfile* profile);
210

lpy's avatar
lpy committed
211 212
  void CodeEventHandler(const CodeEventsContainer& evt_rec) override;

213
  bool is_profiling() const { return is_profiling_; }
214

215 216
  ProfileGenerator* generator() const { return generator_.get(); }
  ProfilerEventsProcessor* processor() const { return processor_.get(); }
217
  Isolate* isolate() const { return isolate_; }
218

219 220
 private:
  void StartProcessorIfNotStarted();
221
  void StopProcessorIfLastProfile(const char* title);
222 223
  void StopProcessor();
  void ResetProfiles();
224
  void LogBuiltins();
225

226
  Isolate* const isolate_;
227
  base::TimeDelta sampling_interval_;
228 229 230
  std::unique_ptr<CpuProfilesCollection> profiles_;
  std::unique_ptr<ProfileGenerator> generator_;
  std::unique_ptr<ProfilerEventsProcessor> processor_;
231
  bool saved_is_logging_;
232
  bool is_profiling_;
233 234 235 236

  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};

237 238
}  // namespace internal
}  // namespace v8
239 240


241
#endif  // V8_PROFILER_CPU_PROFILER_H_