cpu-profiler.h 10.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_

mikhail.naganov@gmail.com's avatar
mikhail.naganov@gmail.com committed
31
#ifdef ENABLE_LOGGING_AND_PROFILING
32

33
#include "atomicops.h"
34
#include "circular-queue.h"
35
#include "unbound-queue.h"
36 37 38 39

namespace v8 {
namespace internal {

40 41 42 43 44
// Forward declarations.
class CodeEntry;
class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
45
class HashMap;
46
class ProfileGenerator;
47
class TokenEnumerator;
48

49 50 51 52 53
#define CODE_EVENTS_TYPE_LIST(V)                                   \
  V(CODE_CREATION,    CodeCreateEventRecord)                       \
  V(CODE_MOVE,        CodeMoveEventRecord)                         \
  V(CODE_DELETE,      CodeDeleteEventRecord)                       \
  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75


class CodeEventRecord {
 public:
#define DECLARE_TYPE(type, ignore) type,
  enum Type {
    NONE = 0,
    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
    NUMBER_OF_TYPES
  };
#undef DECLARE_TYPE

  Type type;
  unsigned order;
};


class CodeCreateEventRecord : public CodeEventRecord {
 public:
  Address start;
  CodeEntry* entry;
  unsigned size;
76
  Address shared;
77

78
  INLINE(void UpdateCodeMap(CodeMap* code_map));
79 80 81 82 83 84 85 86
};


class CodeMoveEventRecord : public CodeEventRecord {
 public:
  Address from;
  Address to;

87
  INLINE(void UpdateCodeMap(CodeMap* code_map));
88 89 90 91 92 93 94
};


class CodeDeleteEventRecord : public CodeEventRecord {
 public:
  Address start;

95
  INLINE(void UpdateCodeMap(CodeMap* code_map));
96 97 98
};


99
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
100
 public:
101 102
  Address from;
  Address to;
103

104
  INLINE(void UpdateCodeMap(CodeMap* code_map));
105 106 107
};


108
class TickSampleEventRecord BASE_EMBEDDED {
109
 public:
110 111 112 113 114
  TickSampleEventRecord()
      : filler(1) {
    ASSERT(filler != SamplingCircularQueue::kClear);
  }

115 116 117 118 119 120
  // The first machine word of a TickSampleEventRecord must not ever
  // become equal to SamplingCircularQueue::kClear.  As both order and
  // TickSample's first field are not reliable in this sense (order
  // can overflow, TickSample can have all fields reset), we are
  // forced to use an artificial filler field.
  int filler;
121
  unsigned order;
122
  TickSample sample;
123

124 125 126
  static TickSampleEventRecord* cast(void* value) {
    return reinterpret_cast<TickSampleEventRecord*>(value);
  }
127

128
  INLINE(static TickSampleEventRecord* init(void* value));
129 130 131 132 133 134 135
};


// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
 public:
136 137
  explicit ProfilerEventsProcessor(Isolate* isolate,
                                   ProfileGenerator* generator);
138
  virtual ~ProfilerEventsProcessor() {}
139 140 141 142

  // Thread control.
  virtual void Run();
  inline void Stop() { running_ = false; }
143
  INLINE(bool running()) { return running_; }
144 145

  // Events adding methods. Called by VM threads.
146 147 148
  void CallbackCreateEvent(Logger::LogEventsAndTags tag,
                           const char* prefix, String* name,
                           Address start);
149 150 151
  void CodeCreateEvent(Logger::LogEventsAndTags tag,
                       String* name,
                       String* resource_name, int line_number,
152
                       Address start, unsigned size,
153
                       Address shared);
154 155 156 157 158 159 160 161
  void CodeCreateEvent(Logger::LogEventsAndTags tag,
                       const char* name,
                       Address start, unsigned size);
  void CodeCreateEvent(Logger::LogEventsAndTags tag,
                       int args_count,
                       Address start, unsigned size);
  void CodeMoveEvent(Address from, Address to);
  void CodeDeleteEvent(Address from);
162
  void SharedFunctionInfoMoveEvent(Address from, Address to);
163 164 165
  void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
                             const char* prefix, String* name,
                             Address start, unsigned size);
166 167
  // Puts current stack into tick sample events buffer.
  void AddCurrentStack();
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186

  // Tick sample events are filled directly in the buffer of the circular
  // queue (because the structure is of fixed width, but usually not all
  // stack frame entries are filled.) This method returns a pointer to the
  // next record of the buffer.
  INLINE(TickSample* TickSampleEvent());

 private:
  union CodeEventsContainer {
    CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_TYPE
  };

  // Called from events processing thread (Run() method.)
  bool ProcessCodeEvent(unsigned* dequeue_order);
  bool ProcessTicks(unsigned dequeue_order);

187 188
  INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));

189 190
  ProfileGenerator* generator_;
  bool running_;
191
  UnboundQueue<CodeEventsContainer> events_buffer_;
192
  SamplingCircularQueue ticks_buffer_;
193
  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
194 195 196 197 198
  unsigned enqueue_order_;
};

} }  // namespace v8::internal

199

200 201
#define PROFILE(isolate, Call)                         \
  LOG(isolate, Call);                                  \
202 203 204 205 206 207
  do {                                                 \
    if (v8::internal::CpuProfiler::is_profiling()) {   \
      v8::internal::CpuProfiler::Call;                 \
    }                                                  \
  } while (false)
#else
208
#define PROFILE(isolate, Call) LOG(isolate, Call)
mikhail.naganov@gmail.com's avatar
mikhail.naganov@gmail.com committed
209
#endif  // ENABLE_LOGGING_AND_PROFILING
210 211 212 213 214


namespace v8 {
namespace internal {

215 216

// TODO(isolates): isolatify this class.
217 218 219 220 221
class CpuProfiler {
 public:
  static void Setup();
  static void TearDown();

mikhail.naganov@gmail.com's avatar
mikhail.naganov@gmail.com committed
222
#ifdef ENABLE_LOGGING_AND_PROFILING
223 224 225
  static void StartProfiling(const char* title);
  static void StartProfiling(String* title);
  static CpuProfile* StopProfiling(const char* title);
226
  static CpuProfile* StopProfiling(Object* security_token, String* title);
227
  static int GetProfilesCount();
228 229
  static CpuProfile* GetProfile(Object* security_token, int index);
  static CpuProfile* FindProfile(Object* security_token, unsigned uid);
230 231 232
  static void DeleteAllProfiles();
  static void DeleteProfile(CpuProfile* profile);
  static bool HasDetachedProfiles();
233 234

  // Invoked from stack sampler (thread or signal handler.)
235
  static TickSample* TickSampleEvent(Isolate* isolate);
236 237 238 239 240 241 242 243 244

  // Must be called via PROFILE macro, otherwise will crash when
  // profiling is not enabled.
  static void CallbackEvent(String* name, Address entry_point);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code, const char* comment);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code, String* name);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
245 246 247 248 249 250
                              Code* code,
                              SharedFunctionInfo *shared,
                              String* name);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code,
                              SharedFunctionInfo *shared,
251 252 253
                              String* source, int line);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code, int args_count);
254
  static void CodeMovingGCEvent() {}
255 256 257 258 259
  static void CodeMoveEvent(Address from, Address to);
  static void CodeDeleteEvent(Address from);
  static void GetterCallbackEvent(String* name, Address entry_point);
  static void RegExpCodeCreateEvent(Code* code, String* source);
  static void SetterCallbackEvent(String* name, Address entry_point);
260
  static void SharedFunctionInfoMoveEvent(Address from, Address to);
261

262 263
  // TODO(isolates): this doesn't have to use atomics anymore.

264
  static INLINE(bool is_profiling()) {
265 266 267 268 269 270
    return is_profiling(Isolate::Current());
  }

  static INLINE(bool is_profiling(Isolate* isolate)) {
    CpuProfiler* profiler = isolate->cpu_profiler();
    return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
271 272 273 274 275 276 277 278 279
  }

 private:
  CpuProfiler();
  ~CpuProfiler();
  void StartCollectingProfile(const char* title);
  void StartCollectingProfile(String* title);
  void StartProcessorIfNotStarted();
  CpuProfile* StopCollectingProfile(const char* title);
280
  CpuProfile* StopCollectingProfile(Object* security_token, String* title);
281
  void StopProcessorIfLastProfile(const char* title);
282 283
  void StopProcessor();
  void ResetProfiles();
284 285 286

  CpuProfilesCollection* profiles_;
  unsigned next_profile_uid_;
287
  TokenEnumerator* token_enumerator_;
288 289
  ProfileGenerator* generator_;
  ProfilerEventsProcessor* processor_;
290
  int saved_logging_nesting_;
291
  bool need_to_stop_sampler_;
292
  Atomic32 is_profiling_;
293 294 295

#else
  static INLINE(bool is_profiling()) { return false; }
mikhail.naganov@gmail.com's avatar
mikhail.naganov@gmail.com committed
296
#endif  // ENABLE_LOGGING_AND_PROFILING
297

298 299 300 301 302 303 304
 private:
  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};

} }  // namespace v8::internal


305
#endif  // V8_CPU_PROFILER_H_