Commit 1596afa7 authored by yurys@chromium.org's avatar yurys@chromium.org

Move *BSD and Solaris Sampler implementation into sampler.cc

This is a first step to having profiler sampler implementation in one file with platform capabilities guarded with #ifdef. Otherwise we have very similar implementations scattered over platform-*.cc files which makes it hard to see differences and make changes.

The next steps will be to merge win32, Mac OS X and Cygwin implementations into sampler.cc They suspend profiled thread instead of sending a signal but apart from that the logic is pretty much the same. Then I'm going to move sampler-related code from log.* into sampler.*

BUG=None

Review URL: https://codereview.chromium.org/13852005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14265 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 591a8ec8
......@@ -170,11 +170,6 @@ class Ticker: public Sampler {
if (IsActive()) Stop();
}
protected:
virtual void DoSampleStack(TickSample* sample) {
StackTracer::Trace(isolate(), sample);
}
private:
Profiler* profiler_;
};
......
......@@ -56,7 +56,6 @@
#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "vm-state-inl.h"
......@@ -683,200 +682,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
static pthread_t GetThreadID() {
pthread_t thread_id = pthread_self();
return thread_id;
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData()
: vm_tid_(GetThreadID()),
profiled_thread_id_(ThreadId::Current()) {}
pthread_t vm_tid() const { return vm_tid_; }
ThreadId profiled_thread_id() { return profiled_thread_id_; }
private:
pthread_t vm_tid_;
ThreadId profiled_thread_id_;
};
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
Isolate::PerIsolateThreadData* per_thread_data = isolate->
FindPerThreadDataForThread(thread_id);
if (!per_thread_data) return;
Simulator* sim = per_thread_data->simulator();
// Check if there is active simulator before allocating TickSample.
if (!sim) return;
#endif
#endif // USE_SIMULATOR
TickSample sample_obj;
TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = isolate->current_vm_state();
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
#elif V8_TARGET_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
#endif
#else
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
#endif // USE_SIMULATOR
sampler->SampleStack(sample);
sampler->Tick(sample);
}
class SignalSender : public Thread {
public:
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Install a signal handler.
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
// Start a thread that sends SIGPROF signal to VM threads.
instance_ = new SignalSender(sampler->interval());
instance_->StartSynchronously();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
instance_->Join();
delete instance_;
instance_ = NULL;
// Restore the old signal handler.
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
}
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
}
Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif
USE(result);
}
const int interval_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
......@@ -886,44 +691,12 @@ void OS::SetUp() {
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
SignalSender::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
delete limit_mutex;
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
} } // namespace v8::internal
......@@ -68,7 +68,6 @@
#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
#include "vm-state-inl.h"
......@@ -992,320 +991,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
// Not all versions of Android's C library provide ucontext_t.
// Detect this and provide custom but compatible definitions. Note that these
// follow the GLibc naming convention to access register values from
// mcontext_t.
//
// See http://code.google.com/p/android/issues/detail?id=34784
#if defined(__arm__)
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
uint32_t regmask;
uint32_t status;
uint64_t pc;
uint64_t gregs[32];
uint64_t fpregs[32];
uint32_t acx;
uint32_t fpc_csr;
uint32_t fpc_eir;
uint32_t used_math;
uint32_t dsp;
uint64_t mdhi;
uint64_t mdlo;
uint32_t hi1;
uint32_t lo1;
uint32_t hi2;
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
} mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
#elif defined(__i386__)
// x86 version for Android.
typedef struct {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
} mcontext_t;
typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
static int GetThreadID() {
#if defined(__ANDROID__)
// Android's C library provides gettid(2).
return gettid();
#else
// Glibc doesn't provide a wrapper for gettid(2).
return syscall(SYS_gettid);
#endif
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData()
: vm_tid_(GetThreadID()),
profiled_thread_id_(ThreadId::Current()) {}
pthread_t vm_tid() const { return vm_tid_; }
ThreadId profiled_thread_id() { return profiled_thread_id_; }
private:
pthread_t vm_tid_;
ThreadId profiled_thread_id_;
};
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#if defined(__native_client__)
// As Native Client does not support signal handling, profiling
// is disabled.
return;
#else
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
Isolate::PerIsolateThreadData* per_thread_data = isolate->
FindPerThreadDataForThread(thread_id);
if (!per_thread_data) return;
Simulator* sim = per_thread_data->simulator();
// Check if there is active simulator before allocating TickSample.
if (!sim) return;
#endif
#endif // USE_SIMULATOR
TickSample sample_obj;
TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
#elif V8_TARGET_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
#endif
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = isolate->current_vm_state();
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
(__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
// Old GLibc ARM versions used a gregs[] array to access the register
// values from mcontext_t.
sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(mcontext.pc);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
#endif // USE_SIMULATOR
sampler->SampleStack(sample);
sampler->Tick(sample);
#endif // __native_client__
}
class SignalSender : public Thread {
public:
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->StartSynchronously();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
instance_->Join();
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
if (!signal_handler_installed_) InstallSignalHandler();
SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
if (signal_handler_installed_) RestoreSignalHandler();
}
Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
#if defined(ANDROID)
syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
#else
int result = syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
USE(result);
ASSERT(result == 0);
#endif
}
void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
#if defined(ANDROID)
usleep(interval);
#else
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif // DEBUG
USE(result);
#endif // ANDROID
}
const int vm_tgid_;
const int interval_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
......@@ -1330,44 +1015,12 @@ void OS::SetUp() {
#endif
}
#endif
SignalSender::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
delete limit_mutex;
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
} } // namespace v8::internal
......@@ -53,7 +53,6 @@
#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
#include "vm-state-inl.h"
......@@ -729,260 +728,17 @@ Semaphore* OS::CreateSemaphore(int count) {
}
static pthread_t GetThreadID() {
return pthread_self();
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData()
: vm_tid_(GetThreadID()),
profiled_thread_id_(ThreadId::Current()) {}
pthread_t vm_tid() const { return vm_tid_; }
ThreadId profiled_thread_id() { return profiled_thread_id_; }
private:
pthread_t vm_tid_;
ThreadId profiled_thread_id_;
};
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
Isolate::PerIsolateThreadData* per_thread_data = isolate->
FindPerThreadDataForThread(thread_id);
if (!per_thread_data) return;
Simulator* sim = per_thread_data->simulator();
// Check if there is active simulator before allocating TickSample.
if (!sim) return;
#endif
#endif // USE_SIMULATOR
TickSample sample_obj;
TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
sample->state = isolate->current_vm_state();
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
#elif V8_TARGET_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
#endif
#else
#ifdef __NetBSD__
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH
#else // OpenBSD
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH
#endif // __NetBSD__
#endif // USE_SIMULATOR
sampler->SampleStack(sample);
sampler->Tick(sample);
}
class SignalSender : public Thread {
public:
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->StartSynchronously();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
instance_->Join();
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
if (!signal_handler_installed_) InstallSignalHandler();
SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else if (signal_handler_installed_) {
RestoreSignalHandler();
}
Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif
USE(result);
}
const int vm_tgid_;
const int interval_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
SignalSender::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
delete limit_mutex;
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
} } // namespace v8::internal
......@@ -54,7 +54,6 @@
#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
#include "vm-state-inl.h"
......@@ -662,198 +661,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
static pthread_t GetThreadID() {
return pthread_self();
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData()
: vm_tid_(GetThreadID()),
profiled_thread_id_(ThreadId::Current()) {}
pthread_t vm_tid() const { return vm_tid_; }
ThreadId profiled_thread_id() { return profiled_thread_id_; }
private:
pthread_t vm_tid_;
ThreadId profiled_thread_id_;
};
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
Isolate::PerIsolateThreadData* per_thread_data = isolate->
FindPerThreadDataForThread(thread_id);
if (!per_thread_data) return;
Simulator* sim = per_thread_data->simulator();
// Check if there is active simulator before allocating TickSample.
if (!sim) return;
#endif
#endif // USE_SIMULATOR
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = isolate->current_vm_state();
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
#elif V8_TARGET_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
#endif
#else
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
#endif // USE_SIMULATOR
sampler->SampleStack(sample);
sampler->Tick(sample);
}
class SignalSender : public Thread {
public:
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->StartSynchronously();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
instance_->Join();
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
if (!signal_handler_installed_) InstallSignalHandler();
SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else if (signal_handler_installed_) {
RestoreSignalHandler();
}
Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif
USE(result);
}
const int interval_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
......@@ -863,43 +670,12 @@ void OS::SetUp() {
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
SignalSender::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
delete limit_mutex;
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
} } // namespace v8::internal
......@@ -99,20 +99,13 @@ int random();
#include "atomicops.h"
#include "lazy-instance.h"
#include "platform-tls.h"
#include "sampler.h"
#include "utils.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
// Use AtomicWord for a machine-sized pointer. It is assumed that
// reads and writes of naturally aligned values of this type are atomic.
#if defined(__OpenBSD__) && defined(__i386__)
typedef Atomic32 AtomicWord;
#else
typedef intptr_t AtomicWord;
#endif
class Semaphore;
class Mutex;
......@@ -727,91 +720,6 @@ class Socket {
};
// ----------------------------------------------------------------------------
// Sampler
//
// A sampler periodically samples the state of the VM and optionally
// (if used for profiling) the program counter and stack pointer for
// the thread that created it.
// TickSample captures the information collected for each sample.
class TickSample {
public:
TickSample()
: state(OTHER),
pc(NULL),
sp(NULL),
fp(NULL),
external_callback(NULL),
frames_count(0) {}
StateTag state; // The state of the VM.
Address pc; // Instruction pointer.
Address sp; // Stack pointer.
Address fp; // Frame pointer.
Address external_callback;
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count : 8; // Number of captured frames.
};
class Sampler {
public:
// Initialize sampler.
Sampler(Isolate* isolate, int interval);
virtual ~Sampler();
int interval() const { return interval_; }
// Performs stack sampling.
void SampleStack(TickSample* sample) {
DoSampleStack(sample);
IncSamplesTaken();
}
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
// Start and stop sampler.
void Start();
void Stop();
// Is the sampler used for profiling?
bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
Isolate* isolate() { return isolate_; }
// Used in tests to make sure that stack sampling is performed.
int samples_taken() const { return samples_taken_; }
void ResetSamplesTaken() { samples_taken_ = 0; }
class PlatformData;
PlatformData* data() { return data_; }
PlatformData* platform_data() { return data_; }
protected:
virtual void DoSampleStack(TickSample* sample) = 0;
private:
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
} } // namespace v8::internal
#endif // V8_PLATFORM_H_
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
|| defined(__NetBSD__) || defined(__sun)
#include <errno.h>
#include <pthread.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/syscall.h>
#include <ucontext.h>
#include <unistd.h>
#endif
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
#include "v8.h"
#include "log.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
namespace v8 {
namespace internal {
#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
|| defined(__NetBSD__) || defined(__sun)
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
// Not all versions of Android's C library provide ucontext_t.
// Detect this and provide custom but compatible definitions. Note that these
// follow the GLibc naming convention to access register values from
// mcontext_t.
//
// See http://code.google.com/p/android/issues/detail?id=34784
#if defined(__arm__)
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
typedef struct {
uint32_t regmask;
uint32_t status;
uint64_t pc;
uint64_t gregs[32];
uint64_t fpregs[32];
uint32_t acx;
uint32_t fpc_csr;
uint32_t fpc_eir;
uint32_t used_math;
uint32_t dsp;
uint64_t mdhi;
uint64_t mdlo;
uint32_t hi1;
uint32_t lo1;
uint32_t hi2;
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
} mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
#elif defined(__i386__)
// x86 version for Android.
typedef struct {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
} mcontext_t;
typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
static pthread_t GetThreadID() {
#if defined(__ANDROID__)
// Android's C library provides gettid(2).
return gettid();
#elif defined(__linux__)
// Glibc doesn't provide a wrapper for gettid(2).
return syscall(SYS_gettid);
#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) \
|| defined(__sun)
return pthread_self();
#endif
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData()
: vm_tid_(GetThreadID()),
profiled_thread_id_(ThreadId::Current()) {}
pthread_t vm_tid() const { return vm_tid_; }
ThreadId profiled_thread_id() { return profiled_thread_id_; }
private:
pthread_t vm_tid_;
ThreadId profiled_thread_id_;
};
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#if defined(__native_client__)
// As Native Client does not support signal handling, profiling
// is disabled.
return;
#else
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
// We require a fully initialized and entered isolate.
return;
}
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
Isolate::PerIsolateThreadData* per_thread_data = isolate->
FindPerThreadDataForThread(thread_id);
if (!per_thread_data) return;
Simulator* sim = per_thread_data->simulator();
// Check if there is active simulator before allocating TickSample.
if (!sim) return;
#endif
#endif // USE_SIMULATOR
TickSample sample_obj;
TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
if (sample == NULL) sample = &sample_obj;
#if defined(USE_SIMULATOR)
#if V8_TARGET_ARCH_ARM
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
#elif V8_TARGET_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
#endif // V8_TARGET_ARCH_*
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = isolate->current_vm_state();
#if defined(__linux__)
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
(__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
// Old GLibc ARM versions used a gregs[] array to access the register
// values from mcontext_t.
sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
// (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
sample->pc = reinterpret_cast<Address>(mcontext.pc);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
#elif defined(__FreeBSD__)
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
#elif defined(__NetBSD__)
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH_*
#elif defined(__OpenBSD__)
USE(mcontext);
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH_*
#elif defined(__sun)
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
#endif // __sun
#endif // USE_SIMULATOR
sampler->SampleStack(sample);
sampler->Tick(sample);
#endif // __native_client__
}
class SignalSender : public Thread {
public:
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
static void RestoreSignalHandler() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->StartSynchronously();
} else {
ASSERT(instance_->interval_ == sampler->interval());
}
}
static void RemoveActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
instance_->Join();
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
}
}
// Implement Thread::Run().
virtual void Run() {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
if (!signal_handler_installed_) InstallSignalHandler();
SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
if (signal_handler_installed_) RestoreSignalHandler();
}
Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
#if defined(ANDROID)
syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) \
|| defined(__sun)
pthread_kill(tid, SIGPROF);
#else
int result = syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
USE(result);
ASSERT(result == 0);
#endif
}
void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
#if defined(ANDROID)
usleep(interval);
#else
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif // DEBUG
USE(result);
#endif // ANDROID
}
const int vm_tgid_;
const int interval_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
void Sampler::SetUp() {
SignalSender::SetUp();
}
void Sampler::TearDown() {
SignalSender::TearDown();
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
ASSERT(IsActive());
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
#else
void Sampler::SetUp() {
}
void Sampler::TearDown() {
}
#endif // __linux__ || _*BSD__ || __sun
void Sampler::SampleStack(TickSample* sample) {
StackTracer::Trace(isolate_, sample);
if (++samples_taken_ < 0) samples_taken_ = 0;
}
} } // namespace v8::internal
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_SAMPLER_H_
#define V8_SAMPLER_H_
#include "atomicops.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
class Isolate;
// ----------------------------------------------------------------------------
// Sampler
//
// A sampler periodically samples the state of the VM and optionally
// (if used for profiling) the program counter and stack pointer for
// the thread that created it.
// TickSample captures the information collected for each sample.
struct TickSample {
TickSample()
: state(OTHER),
pc(NULL),
sp(NULL),
fp(NULL),
external_callback(NULL),
frames_count(0) {}
StateTag state; // The state of the VM.
Address pc; // Instruction pointer.
Address sp; // Stack pointer.
Address fp; // Frame pointer.
Address external_callback;
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count : 8; // Number of captured frames.
};
class Sampler {
public:
// Initializes the Sampler support. Called once at VM startup.
static void SetUp();
static void TearDown();
// Initialize sampler.
Sampler(Isolate* isolate, int interval);
virtual ~Sampler();
Isolate* isolate() const { return isolate_; }
int interval() const { return interval_; }
// Performs stack sampling.
void SampleStack(TickSample* sample);
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
// Start and stop sampler.
void Start();
void Stop();
// Is the sampler used for profiling?
bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
// Used in tests to make sure that stack sampling is performed.
int samples_taken() const { return samples_taken_; }
void ResetSamplesTaken() { samples_taken_ = 0; }
class PlatformData;
PlatformData* platform_data() const { return data_; }
private:
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
Atomic32 active_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
} } // namespace v8::internal
#endif // V8_SAMPLER_H_
......@@ -41,6 +41,7 @@
#include "objects.h"
#include "once.h"
#include "platform.h"
#include "sampler.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "store-buffer.h"
......@@ -123,6 +124,7 @@ void V8::TearDown() {
delete call_completed_callbacks_;
call_completed_callbacks_ = NULL;
Sampler::TearDown();
OS::TearDown();
}
......@@ -270,6 +272,7 @@ void V8::InitializeOncePerProcessImpl() {
}
if (FLAG_trace_hydrogen) FLAG_parallel_recompilation = false;
OS::SetUp();
Sampler::SetUp();
CPU::SetUp();
use_crankshaft_ = FLAG_crankshaft
&& !Serializer::enabled()
......
......@@ -163,7 +163,6 @@ class RelocInfo;
class Deserializer;
class MessageLocation;
class ObjectGroup;
class TickSample;
class VirtualMemory;
class Mutex;
......
......@@ -447,6 +447,8 @@
'../../src/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
'../../src/sampler.cc',
'../../src/sampler.h',
'../../src/scanner-character-streams.cc',
'../../src/scanner-character-streams.h',
'../../src/scanner.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment