CPU profiler: sample call stack on profiling start.

This simplifies writing tests a lot, because it is now possible
to guarantee that certain function will present in a profile by
starting profiler from inside it.

Review URL: http://codereview.chromium.org/2438002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4769 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f939ccbc
......@@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "frames-inl.h"
#include "log-inl.h"
#include "../include/v8-profiler.h"
......@@ -49,7 +50,8 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
enqueue_order_(0) { }
enqueue_order_(0) {
}
void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
......@@ -181,6 +183,24 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
}
void ProfilerEventsProcessor::AddCurrentStack() {
TickSampleEventRecord record;
TickSample* sample = &record.sample;
sample->state = VMState::current_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
sample->frames_count = 0;
for (StackTraceFrameIterator it;
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
JavaScriptFrame* frame = it.frame();
sample->stack[sample->frames_count++] =
reinterpret_cast<Address>(frame->function());
}
record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
}
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
......@@ -205,9 +225,16 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
if (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
}
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return false;
if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
ticks_buffer_.FinishDequeue();
......@@ -416,13 +443,12 @@ void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
processor_->AddCurrentStack();
}
void CpuProfiler::StartCollectingProfile(String* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
StartCollectingProfile(profiles_->GetName(title));
}
......@@ -434,10 +460,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_->Start();
// Enable stack sampling.
// It is important to have it started prior to logging, see issue 683:
// http://code.google.com/p/v8/issues/detail?id=683
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
Logger::LogCodeObjects();
......@@ -445,6 +467,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
}
}
......
......@@ -105,6 +105,11 @@ class CodeAliasEventRecord : public CodeEventRecord {
class TickSampleEventRecord BASE_EMBEDDED {
public:
TickSampleEventRecord()
: filler(1) {
ASSERT(filler != SamplingCircularQueue::kClear);
}
// The first machine word of a TickSampleEventRecord must not ever
// become equal to SamplingCircularQueue::kClear. As both order and
// TickSample's first field are not reliable in this sense (order
......@@ -119,9 +124,6 @@ class TickSampleEventRecord BASE_EMBEDDED {
}
INLINE(static TickSampleEventRecord* init(void* value));
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
};
......@@ -159,6 +161,8 @@ class ProfilerEventsProcessor : public Thread {
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
......@@ -184,6 +188,7 @@ class ProfilerEventsProcessor : public Thread {
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
......
......@@ -260,6 +260,7 @@ class CpuProfilesCollection {
CpuProfile* GetProfile(int security_token_id, unsigned uid);
inline bool is_last_profile();
const char* GetName(String* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
......@@ -274,7 +275,6 @@ class CpuProfilesCollection {
private:
INLINE(const char* GetFunctionName(String* name));
INLINE(const char* GetFunctionName(const char* name));
const char* GetName(String* name);
const char* GetName(int args_count);
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
......
......@@ -82,6 +82,14 @@ void UnboundQueue<Record>::Enqueue(const Record& rec) {
while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
}
template<typename Record>
Record* UnboundQueue<Record>::Peek() {
ASSERT(divider_ != last_);
Node* next = reinterpret_cast<Node*>(divider_)->next;
return &next->value;
}
} } // namespace v8::internal
#endif // V8_UNBOUND_QUEUE_INL_H_
......@@ -47,6 +47,7 @@ class UnboundQueue BASE_EMBEDDED {
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return divider_ == last_; }
INLINE(Record* Peek());
private:
INLINE(void DeleteFirst());
......
......@@ -7,12 +7,14 @@
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
#include "../include/v8-profiler.h"
namespace i = v8::internal;
using i::CodeEntry;
using i::CodeMap;
using i::CpuProfile;
using i::CpuProfiler;
using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
......@@ -668,4 +670,109 @@ TEST(SampleRateCalculator) {
CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms());
}
// --- P r o f i l e r E x t e n s i o n ---
class ProfilerExtension : public v8::Extension {
public:
ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> StartProfiling(const v8::Arguments& args);
static v8::Handle<v8::Value> StopProfiling(const v8::Arguments& args);
private:
static const char* kSource;
};
const char* ProfilerExtension::kSource =
"native function startProfiling();"
"native function stopProfiling();";
v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunction(
v8::Handle<v8::String> name) {
if (name->Equals(v8::String::New("startProfiling"))) {
return v8::FunctionTemplate::New(ProfilerExtension::StartProfiling);
} else if (name->Equals(v8::String::New("stopProfiling"))) {
return v8::FunctionTemplate::New(ProfilerExtension::StopProfiling);
} else {
CHECK(false);
return v8::Handle<v8::FunctionTemplate>();
}
}
v8::Handle<v8::Value> ProfilerExtension::StartProfiling(
const v8::Arguments& args) {
if (args.Length() > 0)
v8::CpuProfiler::StartProfiling(args[0].As<v8::String>());
else
v8::CpuProfiler::StartProfiling(v8::String::New(""));
return v8::Undefined();
}
v8::Handle<v8::Value> ProfilerExtension::StopProfiling(
const v8::Arguments& args) {
if (args.Length() > 0)
v8::CpuProfiler::StopProfiling(args[0].As<v8::String>());
else
v8::CpuProfiler::StopProfiling(v8::String::New(""));
return v8::Undefined();
}
static ProfilerExtension kProfilerExtension;
v8::DeclareExtension kProfilerExtensionDeclaration(&kProfilerExtension);
static v8::Persistent<v8::Context> env;
static const ProfileNode* PickChild(const ProfileNode* parent,
const char* name) {
for (int i = 0; i < parent->children()->length(); ++i) {
const ProfileNode* child = parent->children()->at(i);
if (strcmp(child->entry()->name(), name) == 0) return child;
}
return NULL;
}
TEST(RecordStackTraceAtStartProfiling) {
if (env.IsEmpty()) {
v8::HandleScope scope;
const char* extensions[] = { "v8/profiler" };
v8::ExtensionConfiguration config(1, extensions);
env = v8::Context::New(&config);
}
v8::HandleScope scope;
env->Enter();
CHECK_EQ(0, CpuProfiler::GetProfilesCount());
CompileRun(
"function c() { startProfiling(); }\n"
"function b() { c(); }\n"
"function a() { b(); }\n"
"a();\n"
"stopProfiling();");
CHECK_EQ(1, CpuProfiler::GetProfilesCount());
CpuProfile* profile =
CpuProfiler::GetProfile(NULL, 0);
const ProfileTree* topDown = profile->top_down();
const ProfileNode* current = topDown->root();
// The tree should look like this:
// (root)
// (anonymous function)
// a
// b
// c
current = PickChild(current, "(anonymous function)");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
current = PickChild(current, "a");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
current = PickChild(current, "b");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
current = PickChild(current, "c");
CHECK_NE(NULL, const_cast<ProfileNode*>(current));
CHECK_EQ(0, current->children()->length());
}
#endif // ENABLE_LOGGING_AND_PROFILING
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment