Perform CPU sampling by CPU sampling thread only iff processing thread is not running.

- perform CPU profiler sampling in the sampler thread as we used to;
- skip sampling in the sampling thread if processing thread is running;
- only install SIGPROF handler when CPU profiling is enabled.

BUG=v8:2364

Review URL: https://codereview.chromium.org/11231002
Patch from Sergey Rogulenko <rogulenko@google.com> and Andrey Kosyakov <caseq@chromium.org>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12985 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b6c9bdaa
......@@ -31,7 +31,6 @@
#include "cpu-profiler.h"
#include <new>
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
......@@ -56,11 +55,18 @@ void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
TickSample* ProfilerEventsProcessor::StartTickSampleEvent() {
if (!ticks_buffer_is_empty_ || ticks_buffer_is_initialized_) return NULL;
ticks_buffer_is_initialized_ = true;
generator_->Tick();
TickSampleEventRecord* evt =
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
return &evt->sample;
ticks_buffer_ = TickSampleEventRecord(enqueue_order_);
return &ticks_buffer_.sample;
}
void ProfilerEventsProcessor::FinishTickSampleEvent() {
ASSERT(ticks_buffer_is_initialized_ && ticks_buffer_is_empty_);
ticks_buffer_is_empty_ = false;
}
......
......@@ -39,19 +39,19 @@
namespace v8 {
namespace internal {
static const int kEventsBufferSize = 256 * KB;
static const int kTickSamplesBufferChunkSize = 64 * KB;
static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
int period_in_useconds)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
sampler_(sampler),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
period_in_useconds_(period_in_useconds),
ticks_buffer_is_empty_(true),
ticks_buffer_is_initialized_(false),
enqueue_order_(0) {
}
......@@ -215,23 +215,17 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
generator_->RecordTickSample(record.sample);
}
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
// Make a local copy of tick sample record to ensure that it won't
// be modified as we are processing it. This is possible as the
// sampler writes w/o any sync to the queue, so if the processor
// will get far behind, a record may be modified right under its
// feet.
TickSampleEventRecord record = *rec;
if (record.order == dequeue_order) {
if (ticks_buffer_is_empty_) return !ticks_from_vm_buffer_.IsEmpty();
if (ticks_buffer_.order == dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
if (record.sample.frames_count < 0
|| record.sample.frames_count > TickSample::kMaxFramesCount)
record.sample.frames_count = 0;
generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue();
if (ticks_buffer_.sample.frames_count < 0
|| ticks_buffer_.sample.frames_count > TickSample::kMaxFramesCount) {
ticks_buffer_.sample.frames_count = 0;
}
generator_->RecordTickSample(ticks_buffer_.sample);
ticks_buffer_is_empty_ = true;
ticks_buffer_is_initialized_ = false;
} else {
return true;
}
......@@ -239,22 +233,29 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
}
void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time,
unsigned* dequeue_order) {
while (OS::Ticks() < stop_time) {
if (ProcessTicks(*dequeue_order)) {
// All ticks of the current dequeue_order are processed,
// proceed to the next code event.
ProcessCodeEvent(dequeue_order);
}
}
}
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
// Process ticks until we have any.
if (ProcessTicks(dequeue_order)) {
// All ticks of the current dequeue_order are processed,
// proceed to the next code event.
ProcessCodeEvent(&dequeue_order);
int64_t stop_time = OS::Ticks() + period_in_useconds_;
if (sampler_ != NULL) {
sampler_->DoSample();
}
YieldCPU();
ProcessEventsQueue(stop_time, &dequeue_order);
}
// Process remaining tick events.
ticks_buffer_.FlushResidualRecords();
// Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
}
......@@ -310,15 +311,22 @@ CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
}
TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
TickSample* CpuProfiler::StartTickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
return isolate->cpu_profiler()->processor_->TickSampleEvent();
return isolate->cpu_profiler()->processor_->StartTickSampleEvent();
} else {
return NULL;
}
}
void CpuProfiler::FinishTickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
isolate->cpu_profiler()->processor_->FinishTickSampleEvent();
}
}
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
......@@ -486,13 +494,15 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
Sampler* sampler = isolate->logger()->sampler();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_ = new ProfilerEventsProcessor(generator_,
sampler,
FLAG_cpu_profiler_sampling_period);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
......@@ -505,12 +515,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
processor_->Start();
}
}
......@@ -545,16 +556,17 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
NoBarrier_Store(&is_profiling_, false);
processor_->Stop();
processor_->Join();
Logger* logger = Isolate::Current()->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
Sampler* sampler = logger->sampler();
sampler->DecreaseProfilingDepth();
sampler->SetHasProcessingThread(false);
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
NoBarrier_Store(&is_profiling_, false);
processor_->Stop();
processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
......
......@@ -124,7 +124,9 @@ class TickSampleEventRecord {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator);
explicit ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
int period_in_useconds);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
......@@ -156,11 +158,12 @@ class ProfilerEventsProcessor : public Thread {
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer.
INLINE(TickSample* TickSampleEvent());
// StartTickSampleEvent returns a pointer only if the ticks_buffer_ is empty,
// FinishTickSampleEvent marks the ticks_buffer_ as filled.
// Finish should be called only after successful Start (returning non-NULL
// pointer).
INLINE(TickSample* StartTickSampleEvent());
INLINE(void FinishTickSampleEvent());
private:
union CodeEventsContainer {
......@@ -173,13 +176,19 @@ class ProfilerEventsProcessor : public Thread {
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
void ProcessEventsQueue(int64_t stop_time, unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
Sampler* sampler_;
bool running_;
// Sampling period in microseconds.
const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
TickSampleEventRecord ticks_buffer_;
bool ticks_buffer_is_empty_;
bool ticks_buffer_is_initialized_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
......@@ -218,7 +227,10 @@ class CpuProfiler {
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent(Isolate* isolate);
// Finish should be called only after successful Start (returning non-NULL
// pointer).
static TickSample* StartTickSampleEvent(Isolate* isolate);
static void FinishTickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
......
......@@ -346,6 +346,10 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
// cpu-profiler.cc
DEFINE_int(cpu_profiler_sampling_period, 1000,
"CPU profiler sampling period in microseconds")
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
......
......@@ -699,7 +699,7 @@ class SamplerThread : public Thread {
memset(&context, 0, sizeof(context));
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
......@@ -720,6 +720,7 @@ class SamplerThread : public Thread {
sampler->SampleStack(sample);
sampler->Tick(sample);
}
CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
......@@ -774,6 +775,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
// TODO(rogulenko): implement
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -788,4 +794,12 @@ void Sampler::Stop() {
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -685,7 +685,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
......@@ -707,6 +707,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
CpuProfiler::FinishTickSampleEvent(isolate);
}
......@@ -890,6 +891,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
// TODO(rogulenko): implement
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -904,4 +910,12 @@ void Sampler::Stop() {
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -1025,6 +1025,7 @@ static int GetThreadID() {
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
......@@ -1039,7 +1040,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
......@@ -1075,16 +1076,74 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
CpuProfiler::FinishTickSampleEvent(isolate);
}
class CpuProfilerSignalHandler {
public:
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
ScopedLock lock(mutex_);
if (signal_handler_installed_counter_ > 0) {
signal_handler_installed_counter_++;
return;
}
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0) {
signal_handler_installed_counter_++;
}
}
static void RestoreSignalHandler() {
ScopedLock lock(mutex_);
if (signal_handler_installed_counter_ == 0)
return;
if (signal_handler_installed_counter_ == 1) {
sigaction(SIGPROF, &old_signal_handler_, 0);
}
signal_handler_installed_counter_--;
}
static bool signal_handler_installed() {
return signal_handler_installed_counter_ > 0;
}
private:
static int signal_handler_installed_counter_;
static struct sigaction old_signal_handler_;
static Mutex* mutex_;
};
int CpuProfilerSignalHandler::signal_handler_installed_counter_ = 0;
struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
class Sampler::PlatformData : public Malloced {
public:
PlatformData() : vm_tid_(GetThreadID()) {}
PlatformData()
: vm_tgid_(getpid()),
vm_tid_(GetThreadID()) {}
int vm_tid() const { return vm_tid_; }
void SendProfilingSignal() {
if (!CpuProfilerSignalHandler::signal_handler_installed()) return;
// Glibc doesn't provide a wrapper for tgkill(2).
#if defined(ANDROID)
syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
#else
syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
#endif
}
private:
const int vm_tgid_;
const int vm_tid_;
};
......@@ -1100,28 +1159,11 @@ class SignalSender : public Thread {
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
......@@ -1142,7 +1184,6 @@ class SignalSender : public Thread {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
......@@ -1154,18 +1195,13 @@ class SignalSender : public Thread {
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
if (cpu_profiling_enabled && !signal_handler_installed_) {
InstallSignalHandler();
} else if (!cpu_profiling_enabled && signal_handler_installed_) {
RestoreSignalHandler();
}
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
if (cpu_profiling_enabled && runtime_profiler_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
return;
}
Sleep(HALF_INTERVAL);
......@@ -1175,8 +1211,7 @@ class SignalSender : public Thread {
Sleep(HALF_INTERVAL);
} else {
if (cpu_profiling_enabled) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
this)) {
if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) {
return;
}
}
......@@ -1191,10 +1226,9 @@ class SignalSender : public Thread {
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
static void DoCpuProfile(Sampler* sampler, void*) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
sampler->platform_data()->SendProfilingSignal();
}
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
......@@ -1202,16 +1236,6 @@ class SignalSender : public Thread {
sampler->isolate()->runtime_profiler()->NotifyTick();
}
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
#if defined(ANDROID)
syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
#else
syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
#endif
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
......@@ -1234,15 +1258,12 @@ class SignalSender : public Thread {
#endif // ANDROID
}
const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
......@@ -1251,8 +1272,6 @@ class SignalSender : public Thread {
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
......@@ -1280,11 +1299,13 @@ void OS::SetUp() {
}
#endif
SignalSender::SetUp();
CpuProfilerSignalHandler::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
CpuProfilerSignalHandler::TearDown();
delete limit_mutex;
}
......@@ -1294,6 +1315,7 @@ Sampler::Sampler(Isolate* isolate, int interval)
interval_(interval),
profiling_(false),
active_(false),
has_processing_thread_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
......@@ -1305,6 +1327,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
platform_data()->SendProfilingSignal();
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -1319,4 +1346,14 @@ void Sampler::Stop() {
}
void Sampler::StartSampling() {
CpuProfilerSignalHandler::InstallSignalHandler();
}
void Sampler::StopSampling() {
CpuProfilerSignalHandler::RestoreSignalHandler();
}
} } // namespace v8::internal
......@@ -825,7 +825,7 @@ class SamplerThread : public Thread {
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
......@@ -863,6 +863,7 @@ class SamplerThread : public Thread {
sampler->SampleStack(sample);
sampler->Tick(sample);
}
CpuProfiler::FinishTickSampleEvent(sampler->isolate());
thread_resume(profiled_thread);
}
......@@ -915,6 +916,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
// TODO(rogulenko): implement
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -929,4 +935,12 @@ void Sampler::Stop() {
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -514,4 +514,12 @@ void ProfileSampler::Stop() {
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -738,7 +738,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
......@@ -768,6 +768,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#endif // __NetBSD__
sampler->SampleStack(sample);
sampler->Tick(sample);
CpuProfiler::FinishTickSampleEvent(isolate);
}
......@@ -970,6 +971,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
// TODO(rogulenko): implement
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -984,4 +990,12 @@ void Sampler::Stop() {
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -672,7 +672,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
......@@ -686,6 +686,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sampler->SampleStack(sample);
sampler->Tick(sample);
CpuProfiler::FinishTickSampleEvent(isolate);
}
class Sampler::PlatformData : public Malloced {
......@@ -889,6 +890,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
// TODO(rogulenko): implement
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -902,4 +908,13 @@ void Sampler::Stop() {
SetActive(false);
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -2045,7 +2045,7 @@ class SamplerThread : public Thread {
memset(&context, 0, sizeof(context));
TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
......@@ -2066,6 +2066,7 @@ class SamplerThread : public Thread {
sampler->SampleStack(sample);
sampler->Tick(sample);
}
CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
......@@ -2120,6 +2121,11 @@ Sampler::~Sampler() {
}
void Sampler::DoSample() {
// TODO(rogulenko): implement
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
......@@ -2134,4 +2140,12 @@ void Sampler::Stop() {
}
void Sampler::StartSampling() {
}
void Sampler::StopSampling() {
}
} } // namespace v8::internal
......@@ -749,6 +749,9 @@ class Sampler {
IncSamplesTaken();
}
// Performs platform-specific stack sampling.
void DoSample();
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
......@@ -757,10 +760,28 @@ class Sampler {
void Start();
void Stop();
// Is the sampler used for profiling?
bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
// Whether the sampling thread should use this Sampler for CPU profiling?
bool IsProfiling() const {
return NoBarrier_Load(&profiling_) > 0 &&
!NoBarrier_Load(&has_processing_thread_);
}
// Perform platform-specific initialization before DoSample() may be invoked.
void StartSampling();
// Perform platform-specific cleanup after samping.
void StopSampling();
void IncreaseProfilingDepth() {
if (NoBarrier_AtomicIncrement(&profiling_, 1) == 1) {
StartSampling();
}
}
void DecreaseProfilingDepth() {
if (!NoBarrier_AtomicIncrement(&profiling_, -1)) {
StopSampling();
}
}
void SetHasProcessingThread(bool value) {
NoBarrier_Store(&has_processing_thread_, value);
}
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
......@@ -787,6 +808,7 @@ class Sampler {
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
Atomic32 has_processing_thread_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
......
......@@ -5,6 +5,7 @@
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
#include "platform.h"
#include "../include/v8-profiler.h"
using i::CodeEntry;
......@@ -20,7 +21,7 @@ using i::TokenEnumerator;
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
processor.Stop();
processor.Join();
......@@ -38,11 +39,13 @@ static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame1,
i::Address frame2 = NULL,
i::Address frame3 = NULL) {
i::TickSample* sample = proc->TickSampleEvent();
static void AddTickSampleEvent(ProfilerEventsProcessor* processor,
i::Address frame1,
i::Address frame2 = NULL,
i::Address frame3 = NULL) {
i::TickSample* sample;
i::OS::Sleep(20);
while ((sample = processor->StartTickSampleEvent()) == NULL) i::OS::Sleep(20);
sample->pc = frame1;
sample->tos = frame1;
sample->frames_count = 0;
......@@ -54,6 +57,7 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
sample->stack[1] = frame3;
sample->frames_count = 2;
}
processor->FinishTickSampleEvent();
}
namespace {
......@@ -81,7 +85,7 @@ TEST(CodeEvents) {
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
// Enqueue code creation events.
......@@ -108,8 +112,8 @@ TEST(CodeEvents) {
processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeCreateEvent(i::Logger::STUB_TAG, 4, ToAddress(0x1605), 0x10);
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
// Add a tick event to enable code events processing.
AddTickSampleEvent(&processor, ToAddress(0x1000));
processor.Stop();
processor.Join();
......@@ -142,7 +146,7 @@ TEST(TickEvents) {
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
......@@ -154,12 +158,12 @@ TEST(TickEvents) {
"ddd",
ToAddress(0x1400),
0x80);
EnqueueTickSampleEvent(&processor, ToAddress(0x1210));
EnqueueTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
EnqueueTickSampleEvent(&processor,
ToAddress(0x1404),
ToAddress(0x1305),
ToAddress(0x1230));
AddTickSampleEvent(&processor, ToAddress(0x1210));
AddTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
AddTickSampleEvent(&processor,
ToAddress(0x1404),
ToAddress(0x1305),
ToAddress(0x1230));
processor.Stop();
processor.Join();
......@@ -232,7 +236,7 @@ TEST(Issue1398) {
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
ProfilerEventsProcessor processor(&generator, NULL, 1000);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
......@@ -240,13 +244,14 @@ TEST(Issue1398) {
ToAddress(0x1200),
0x80);
i::TickSample* sample = processor.TickSampleEvent();
i::TickSample* sample = processor.StartTickSampleEvent();
sample->pc = ToAddress(0x1200);
sample->tos = 0;
sample->frames_count = i::TickSample::kMaxFramesCount;
for (int i = 0; i < sample->frames_count; ++i) {
sample->stack[i] = ToAddress(0x1200);
}
processor.FinishTickSampleEvent();
processor.Stop();
processor.Join();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment