cpu-profiler.h 9.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_

31
#include "allocation.h"
32
#include "atomicops.h"
33
#include "circular-queue.h"
34
#include "unbound-queue.h"
35 36 37 38

namespace v8 {
namespace internal {

39 40 41 42 43
// Forward declarations.
class CodeEntry;
class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
44
class HashMap;
45
class ProfileGenerator;
46
class TokenEnumerator;
47

48 49 50 51
#define CODE_EVENTS_TYPE_LIST(V)                                   \
  V(CODE_CREATION,    CodeCreateEventRecord)                       \
  V(CODE_MOVE,        CodeMoveEventRecord)                         \
  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73


class CodeEventRecord {
 public:
#define DECLARE_TYPE(type, ignore) type,
  enum Type {
    NONE = 0,
    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
    NUMBER_OF_TYPES
  };
#undef DECLARE_TYPE

  Type type;
  unsigned order;
};


class CodeCreateEventRecord : public CodeEventRecord {
 public:
  Address start;
  CodeEntry* entry;
  unsigned size;
74
  Address shared;
75

76
  INLINE(void UpdateCodeMap(CodeMap* code_map));
77 78 79 80 81 82 83 84
};


class CodeMoveEventRecord : public CodeEventRecord {
 public:
  Address from;
  Address to;

85
  INLINE(void UpdateCodeMap(CodeMap* code_map));
86 87 88
};


89
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
90
 public:
91 92
  Address from;
  Address to;
93

94
  INLINE(void UpdateCodeMap(CodeMap* code_map));
95 96 97
};


98
class TickSampleEventRecord {
99
 public:
100 101 102 103 104 105
  // The parameterless constructor is used when we dequeue data from
  // the ticks buffer.
  TickSampleEventRecord() { }
  explicit TickSampleEventRecord(unsigned order)
      : filler(1),
        order(order) {
106 107 108
    ASSERT(filler != SamplingCircularQueue::kClear);
  }

109 110 111 112 113 114
  // The first machine word of a TickSampleEventRecord must not ever
  // become equal to SamplingCircularQueue::kClear.  As both order and
  // TickSample's first field are not reliable in this sense (order
  // can overflow, TickSample can have all fields reset), we are
  // forced to use an artificial filler field.
  int filler;
115
  unsigned order;
116
  TickSample sample;
117

118 119 120
  static TickSampleEventRecord* cast(void* value) {
    return reinterpret_cast<TickSampleEventRecord*>(value);
  }
121 122 123 124 125 126 127
};


// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
 public:
128
  explicit ProfilerEventsProcessor(ProfileGenerator* generator);
129
  virtual ~ProfilerEventsProcessor() {}
130 131 132 133

  // Thread control.
  virtual void Run();
  inline void Stop() { running_ = false; }
134
  INLINE(bool running()) { return running_; }
135 136

  // Events adding methods. Called by VM threads.
137 138 139
  void CallbackCreateEvent(Logger::LogEventsAndTags tag,
                           const char* prefix, String* name,
                           Address start);
140 141 142
  void CodeCreateEvent(Logger::LogEventsAndTags tag,
                       String* name,
                       String* resource_name, int line_number,
143
                       Address start, unsigned size,
144
                       Address shared);
145 146 147 148 149 150 151 152
  void CodeCreateEvent(Logger::LogEventsAndTags tag,
                       const char* name,
                       Address start, unsigned size);
  void CodeCreateEvent(Logger::LogEventsAndTags tag,
                       int args_count,
                       Address start, unsigned size);
  void CodeMoveEvent(Address from, Address to);
  void CodeDeleteEvent(Address from);
153
  void SharedFunctionInfoMoveEvent(Address from, Address to);
154 155 156
  void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
                             const char* prefix, String* name,
                             Address start, unsigned size);
157 158
  // Puts current stack into tick sample events buffer.
  void AddCurrentStack();
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

  // Tick sample events are filled directly in the buffer of the circular
  // queue (because the structure is of fixed width, but usually not all
  // stack frame entries are filled.) This method returns a pointer to the
  // next record of the buffer.
  INLINE(TickSample* TickSampleEvent());

 private:
  union CodeEventsContainer {
    CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_TYPE
  };

  // Called from events processing thread (Run() method.)
  bool ProcessCodeEvent(unsigned* dequeue_order);
  bool ProcessTicks(unsigned dequeue_order);

178 179
  INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));

180 181
  ProfileGenerator* generator_;
  bool running_;
182
  UnboundQueue<CodeEventsContainer> events_buffer_;
183
  SamplingCircularQueue ticks_buffer_;
184
  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
185 186 187 188 189
  unsigned enqueue_order_;
};

} }  // namespace v8::internal

190

191 192 193 194 195 196
#define PROFILE(isolate, Call)                                \
  LOG(isolate, Call);                                         \
  do {                                                        \
    if (v8::internal::CpuProfiler::is_profiling(isolate)) {   \
      v8::internal::CpuProfiler::Call;                        \
    }                                                         \
197 198 199 200 201 202
  } while (false)


namespace v8 {
namespace internal {

203 204

// TODO(isolates): isolatify this class.
205 206 207 208 209 210 211 212
class CpuProfiler {
 public:
  static void Setup();
  static void TearDown();

  static void StartProfiling(const char* title);
  static void StartProfiling(String* title);
  static CpuProfile* StopProfiling(const char* title);
213
  static CpuProfile* StopProfiling(Object* security_token, String* title);
214
  static int GetProfilesCount();
215 216
  static CpuProfile* GetProfile(Object* security_token, int index);
  static CpuProfile* FindProfile(Object* security_token, unsigned uid);
217 218 219
  static void DeleteAllProfiles();
  static void DeleteProfile(CpuProfile* profile);
  static bool HasDetachedProfiles();
220 221

  // Invoked from stack sampler (thread or signal handler.)
222
  static TickSample* TickSampleEvent(Isolate* isolate);
223 224 225 226 227 228 229 230 231

  // Must be called via PROFILE macro, otherwise will crash when
  // profiling is not enabled.
  static void CallbackEvent(String* name, Address entry_point);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code, const char* comment);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code, String* name);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
232 233 234 235 236 237
                              Code* code,
                              SharedFunctionInfo *shared,
                              String* name);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code,
                              SharedFunctionInfo *shared,
238 239 240
                              String* source, int line);
  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                              Code* code, int args_count);
241
  static void CodeMovingGCEvent() {}
242 243 244 245 246
  static void CodeMoveEvent(Address from, Address to);
  static void CodeDeleteEvent(Address from);
  static void GetterCallbackEvent(String* name, Address entry_point);
  static void RegExpCodeCreateEvent(Code* code, String* source);
  static void SetterCallbackEvent(String* name, Address entry_point);
247
  static void SharedFunctionInfoMoveEvent(Address from, Address to);
248

249 250 251 252 253
  // TODO(isolates): this doesn't have to use atomics anymore.

  static INLINE(bool is_profiling(Isolate* isolate)) {
    CpuProfiler* profiler = isolate->cpu_profiler();
    return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
254 255 256 257 258 259 260 261 262
  }

 private:
  CpuProfiler();
  ~CpuProfiler();
  void StartCollectingProfile(const char* title);
  void StartCollectingProfile(String* title);
  void StartProcessorIfNotStarted();
  CpuProfile* StopCollectingProfile(const char* title);
263
  CpuProfile* StopCollectingProfile(Object* security_token, String* title);
264
  void StopProcessorIfLastProfile(const char* title);
265 266
  void StopProcessor();
  void ResetProfiles();
267 268 269

  CpuProfilesCollection* profiles_;
  unsigned next_profile_uid_;
270
  TokenEnumerator* token_enumerator_;
271 272
  ProfileGenerator* generator_;
  ProfilerEventsProcessor* processor_;
273
  int saved_logging_nesting_;
274
  bool need_to_stop_sampler_;
275
  Atomic32 is_profiling_;
276 277 278 279 280 281 282 283

 private:
  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};

} }  // namespace v8::internal


284
#endif  // V8_CPU_PROFILER_H_