Commit a0198c0f authored by lpy's avatar lpy Committed by Commit bot

Reland: Create libsampler as V8 sampler library.

This patch does five things:

1. Extracts sampler as libsampler to provide sampling functionality support.
2. Makes SampleStack virtual so embedders can override the behaviour of sample collecting.
3. Removes sampler.[h|cc].
4. Moves sampling thread into log.cc as workaround to keep the --prof functionality.
5. Creates SamplerManager to manage the relationship between samplers and threads.

The reason we port hashmap.h is that in debug mode, STL containers are using
mutexes from a mutex pool, which may lead to deadlock when using asynchronously
signal handler.

Currently libsampler is used in V8 temporarily.

BUG=v8:4789
LOG=n

Committed: https://crrev.com/06cc9b7c176a6223971deaa9fbcafe1a05058c7b
Cr-Commit-Position: refs/heads/master@{#36527}

Review-Url: https://codereview.chromium.org/1922303002
Cr-Commit-Position: refs/heads/master@{#36532}
parent 6a92d748
......@@ -105,6 +105,11 @@ config("libplatform_config") {
include_dirs = [ "include" ]
}
# This config should be applied to code using the libsampler.
config("libsampler_config") {
include_dirs = [ "include" ]
}
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
......@@ -1329,8 +1334,6 @@ v8_source_set("v8_base") {
"src/profiler/profile-generator-inl.h",
"src/profiler/profile-generator.cc",
"src/profiler/profile-generator.h",
"src/profiler/sampler.cc",
"src/profiler/sampler.h",
"src/profiler/sampling-heap-profiler.cc",
"src/profiler/sampling-heap-profiler.h",
"src/profiler/strings-storage.cc",
......@@ -1816,6 +1819,7 @@ v8_source_set("v8_base") {
defines = []
deps = [
":v8_libbase",
":v8_libsampler",
]
if (is_win) {
......@@ -1970,6 +1974,23 @@ v8_source_set("v8_libplatform") {
]
}
v8_source_set("v8_libsampler") {
sources = [
"src/libsampler/hashmap.h",
"src/libsampler/utils.h",
"src/libsampler/v8-sampler.cc",
"src/libsampler/v8-sampler.h",
]
configs = [ ":internal_config_base" ]
public_configs = [ ":libsampler_config" ]
deps = [
":v8_libbase",
]
}
v8_source_set("fuzzer_support") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
......
......@@ -6331,6 +6331,12 @@ class V8_EXPORT Isolate {
*/
void VisitWeakHandles(PersistentHandleVisitor* visitor);
/**
* Check if this isolate is in use.
* True if at least one thread Enter'ed this isolate.
*/
bool IsInUse();
private:
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
......
......@@ -7793,6 +7793,12 @@ void Isolate::VisitExternalResources(ExternalResourceVisitor* visitor) {
}
bool Isolate::IsInUse() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsInUse();
}
class VisitorAdapter : public i::ObjectVisitor {
public:
explicit VisitorAdapter(PersistentHandleVisitor* visitor)
......
......@@ -27,10 +27,10 @@
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/libsampler/v8-sampler.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/sampler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
......@@ -2006,7 +2006,7 @@ void Isolate::Deinit() {
}
// We must stop the logger before we tear down other components.
Sampler* sampler = logger_->sampler();
sampler::Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
delete deoptimizer_data_;
......
include_rules = [
"+include",
"-src",
"+src/base",
"+src/libsampler",
]
\ No newline at end of file
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is ported from src/hashmap.h
#ifndef V8_LIBSAMPLER_HASHMAP_H_
#define V8_LIBSAMPLER_HASHMAP_H_
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/libsampler/utils.h"
namespace v8 {
namespace sampler {
class HashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
// The default capacity.
static const uint32_t kDefaultHashMapCapacity = 8;
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
HashMapImpl(MatchFun match,
uint32_t capacity = kDefaultHashMapCapacity);
~HashMapImpl();
// HashMap entries are (key, value, hash) triplets.
// Some clients may not need to use the value slot
// (e.g. implementers of sets, where the key is the value).
struct Entry {
void* key;
void* value;
uint32_t hash; // The full hash value for key
int order; // If you never remove entries this is the insertion order.
};
// If an entry with matching key is found, returns that entry.
// Otherwise, NULL is returned.
Entry* Lookup(void* key, uint32_t hash) const;
// If an entry with matching key is found, returns that entry.
// If no matching entry is found, a new entry is inserted with
// corresponding key, key hash, and NULL value.
Entry* LookupOrInsert(void* key, uint32_t hash);
// Removes the entry with matching key.
// It returns the value of the deleted entry
// or null if there is no value for such key.
void* Remove(void* key, uint32_t hash);
// Empties the hash map (occupancy() == 0).
void Clear();
// The number of (non-empty) entries in the table.
uint32_t occupancy() const { return occupancy_; }
// The capacity of the table. The implementation
// makes sure that occupancy is at most 80% of
// the table capacity.
uint32_t capacity() const { return capacity_; }
// Iteration
//
// for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
// ...
// }
//
// If entries are inserted during iteration, the effect of
// calling Next() is undefined.
Entry* Start() const;
Entry* Next(Entry* p) const;
// Some match functions defined for convenience.
static bool PointersMatch(void* key1, void* key2) {
return key1 == key2;
}
private:
MatchFun match_;
Entry* map_;
uint32_t capacity_;
uint32_t occupancy_;
Entry* map_end() const { return map_ + capacity_; }
Entry* Probe(void* key, uint32_t hash) const;
void Initialize(uint32_t capacity);
void Resize();
};
typedef HashMapImpl HashMap;
HashMapImpl::HashMapImpl(MatchFun match, uint32_t initial_capacity) {
match_ = match;
Initialize(initial_capacity);
}
HashMapImpl::~HashMapImpl() {
Malloced::Delete(map_);
}
HashMapImpl::Entry* HashMapImpl::Lookup(void* key, uint32_t hash) const {
Entry* p = Probe(key, hash);
return p->key != NULL ? p : NULL;
}
HashMapImpl::Entry* HashMapImpl::LookupOrInsert(void* key, uint32_t hash) {
// Find a matching entry.
Entry* p = Probe(key, hash);
if (p->key != NULL) {
return p;
}
// No entry found; insert one.
p->key = key;
p->value = NULL;
p->hash = hash;
p->order = occupancy_;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_ / 4 >= capacity_) {
Resize();
p = Probe(key, hash);
}
return p;
}
void* HashMapImpl::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
// Key not found nothing to remove.
return NULL;
}
void* value = p->value;
// To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their
// initial position inside this interval, clearing the entry to remove will
// not break the search. If, while searching for the next empty entry, an
// entry is encountered which does not have its initial position between the
// entry to remove and the position looked at, then this entry can be moved to
// the place of the entry to remove without breaking the search for it. The
// entry made vacant by this move is now the entry to remove and the process
// starts over.
// Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
// This guarantees loop termination as there is at least one empty entry so
// eventually the removed entry will have an empty entry after it.
DCHECK(occupancy_ < capacity_);
// p is the candidate entry to clear. q is used to scan forwards.
Entry* q = p; // Start at the entry to remove.
while (true) {
// Move q to the next entry.
q = q + 1;
if (q == map_end()) {
q = map_;
}
// All entries between p and q have their initial position between p and q
// and the entry p can be cleared without breaking the search for these
// entries.
if (q->key == NULL) {
break;
}
// Find the initial position for the entry at position q.
Entry* r = map_ + (q->hash & (capacity_ - 1));
// If the entry at position q has its initial position outside the range
// between p and q it can be moved forward to position p and will still be
// found. There is now a new candidate entry for clearing.
if ((q > p && (r <= p || r > q)) ||
(q < p && (r <= p && r > q))) {
*p = *q;
p = q;
}
}
// Clear the entry which is allowed to en emptied.
p->key = NULL;
occupancy_--;
return value;
}
void HashMapImpl::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
p->key = NULL;
}
occupancy_ = 0;
}
HashMapImpl::Entry* HashMapImpl::Start() const {
return Next(map_ - 1);
}
HashMapImpl::Entry* HashMapImpl::Next(Entry* p) const {
const Entry* end = map_end();
DCHECK(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
if (p->key != NULL) {
return p;
}
}
return NULL;
}
HashMapImpl::Entry* HashMapImpl::Probe(void* key, uint32_t hash) const {
DCHECK(key != NULL);
DCHECK(base::bits::IsPowerOfTwo32(capacity_));
Entry* p = map_ + (hash & (capacity_ - 1));
const Entry* end = map_end();
DCHECK(map_ <= p && p < end);
DCHECK(occupancy_ < capacity_); // Guarantees loop termination.
while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
p++;
if (p >= end) {
p = map_;
}
}
return p;
}
void HashMapImpl::Initialize(uint32_t capacity) {
DCHECK(base::bits::IsPowerOfTwo32(capacity));
map_ = reinterpret_cast<Entry*>(Malloced::New(capacity * sizeof(Entry)));
CHECK(map_ != NULL);
capacity_ = capacity;
Clear();
}
void HashMapImpl::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
// Allocate larger map.
Initialize(capacity_ * 2);
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
Entry* entry = LookupOrInsert(p->key, p->hash);
entry->value = p->value;
entry->order = p->order;
n--;
}
}
// Delete old map.
Malloced::Delete(map);
}
} // namespace sampler
} // namespace v8
#endif // V8_LIBSAMPLER_HASHMAP_H_
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_LIBSAMPLER_UTILS_H_
#define V8_LIBSAMPLER_UTILS_H_
#include "include/v8.h"
namespace v8 {
namespace sampler {
class Malloced {
public:
static void* New(size_t size) {
return malloc(size);
}
static void Delete(void* p) {
free(p);
}
};
} // namespace sampler
} // namespace v8
#endif // V8_LIBSAMPLER_UTILS_H_
// Copyright 2013 the V8 project authors. All rights reserved.
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/profiler/sampler.h"
#include "src/libsampler/v8-sampler.h"
#if V8_OS_POSIX && !V8_OS_CYGWIN
......@@ -42,12 +42,13 @@
#endif
#include <algorithm>
#include <vector>
#include <map>
#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/tick-sample.h"
#include "src/simulator.h"
#include "src/v8threads.h"
#include "src/libsampler/hashmap.h"
#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
......@@ -155,26 +156,14 @@ enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
namespace v8 {
namespace internal {
namespace sampler {
namespace {
class PlatformDataCommon : public Malloced {
public:
PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
ThreadId profiled_thread_id() { return profiled_thread_id_; }
protected:
~PlatformDataCommon() {}
private:
ThreadId profiled_thread_id_;
};
typedef List<Sampler*> SamplerList;
#if defined(USE_SIGNALS)
typedef std::vector<Sampler*> SamplerList;
typedef SamplerList::iterator SamplerListIterator;
class AtomicGuard {
public:
explicit AtomicGuard(base::AtomicValue<int>* atomic, bool is_block = true)
......@@ -216,13 +205,14 @@ uint32_t ThreadHash(pthread_t thread_id) {
return static_cast<uint32_t>(thread_id);
#endif
}
#endif // USE_SIGNALS
} // namespace
#if defined(USE_SIGNALS)
class Sampler::PlatformData : public PlatformDataCommon {
class Sampler::PlatformData {
public:
PlatformData() : vm_tid_(pthread_self()) {}
pthread_t vm_tid() const { return vm_tid_; }
......@@ -231,13 +221,85 @@ class Sampler::PlatformData : public PlatformDataCommon {
pthread_t vm_tid_;
};
class SamplerManager {
public:
static void AddSampler(Sampler* sampler) {
AtomicGuard atomic_guard(&samplers_access_counter_);
DCHECK(sampler->IsActive() || !sampler->IsRegistered());
// Add sampler into map if needed.
pthread_t thread_id = sampler->platform_data()->vm_tid();
HashMap::Entry* entry =
sampler_map_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
ThreadHash(thread_id));
DCHECK(entry != NULL);
if (entry->value == NULL) {
SamplerList* samplers = new SamplerList();
samplers->push_back(sampler);
entry->value = samplers;
} else {
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
bool exists = false;
for (SamplerListIterator iter = samplers->begin();
iter != samplers->end(); ++iter) {
if (*iter == sampler) {
exists = true;
break;
}
}
if (!exists) {
samplers->push_back(sampler);
}
}
}
static void RemoveSampler(Sampler* sampler) {
AtomicGuard atomic_guard(&samplers_access_counter_);
DCHECK(sampler->IsActive() || sampler->IsRegistered());
// Remove sampler from map.
pthread_t thread_id = sampler->platform_data()->vm_tid();
void* thread_key = ThreadKey(thread_id);
uint32_t thread_hash = ThreadHash(thread_id);
HashMap::Entry* entry = sampler_map_.Get().Lookup(thread_key, thread_hash);
DCHECK(entry != NULL);
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
++iter) {
if (*iter == sampler) {
samplers->erase(iter);
break;
}
}
if (samplers->empty()) {
sampler_map_.Pointer()->Remove(thread_key, thread_hash);
delete samplers;
}
}
private:
struct HashMapCreateTrait {
static void Construct(HashMap* allocated_ptr) {
new (allocated_ptr) HashMap(HashMap::PointersMatch);
}
};
friend class SignalHandler;
static base::LazyInstance<HashMap, HashMapCreateTrait>::type
sampler_map_;
static base::AtomicValue<int> samplers_access_counter_;
};
base::LazyInstance<HashMap, SamplerManager::HashMapCreateTrait>::type
SamplerManager::sampler_map_ = LAZY_INSTANCE_INITIALIZER;
base::AtomicValue<int> SamplerManager::samplers_access_counter_(0);
#elif V8_OS_WIN || V8_OS_CYGWIN
// ----------------------------------------------------------------------------
// Win32 profiler support. On Cygwin we use the same sampler implementation as
// on Win32.
class Sampler::PlatformData : public PlatformDataCommon {
class Sampler::PlatformData {
public:
// Get a handle to the calling thread. This is the thread that we are
// going to profile. We need to make a copy of the handle because we are
......@@ -263,12 +325,11 @@ class Sampler::PlatformData : public PlatformDataCommon {
private:
HANDLE profiled_thread_;
};
#endif
#endif // USE_SIGNALS
#if defined(USE_SIGNALS)
class SignalHandler : public AllStatic {
class SignalHandler {
public:
static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
static void TearDown() { delete mutex_; mutex_ = NULL; }
......@@ -284,13 +345,10 @@ class SignalHandler : public AllStatic {
}
static bool Installed() {
base::LockGuard<base::Mutex> lock_guard(mutex_);
return signal_handler_installed_;
}
#if !V8_OS_NACL
static void CollectSample(void* context, Sampler* sampler);
#endif
private:
static void Install() {
#if !V8_OS_NACL
......@@ -304,7 +362,7 @@ class SignalHandler : public AllStatic {
#endif
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
#endif
#endif // !V8_OS_NACL
}
static void Restore() {
......@@ -317,6 +375,7 @@ class SignalHandler : public AllStatic {
}
#if !V8_OS_NACL
static void FillRegisterState(void* context, RegisterState* regs);
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
#endif
// Protects the process wide state below.
......@@ -335,26 +394,36 @@ bool SignalHandler::signal_handler_installed_ = false;
// As Native Client does not support signal handling, profiling is disabled.
#if !V8_OS_NACL
void SignalHandler::CollectSample(void* context, Sampler* sampler) {
if (sampler == NULL || (!sampler->IsProfiling() &&
!sampler->IsRegistered())) {
return;
}
Isolate* isolate = sampler->isolate();
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
USE(info);
if (signal != SIGPROF) return;
AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
if (!atomic_guard.is_success()) return;
pthread_t thread_id = pthread_self();
HashMap::Entry* entry =
SamplerManager::sampler_map_.Pointer()->Lookup(ThreadKey(thread_id),
ThreadHash(thread_id));
if (entry == NULL) return;
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
// We require a fully initialized and entered isolate.
if (isolate == NULL || !isolate->IsInUse()) return;
v8::RegisterState state;
FillRegisterState(context, &state);
if (v8::Locker::IsActive() &&
!isolate->thread_manager()->IsLockedByCurrentThread()) {
return;
}
for (int i = 0; i < samplers->size(); ++i) {
Sampler* sampler = (*samplers)[i];
Isolate* isolate = sampler->isolate();
v8::RegisterState state;
// We require a fully initialized and entered isolate.
if (isolate == NULL || !isolate->IsInUse()) return;
#if defined(USE_SIMULATOR)
if (!SimulatorHelper::FillRegisters(isolate, &state)) return;
#else
if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) return;
sampler->SampleStack(state);
}
}
void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
......@@ -362,381 +431,187 @@ void SignalHandler::CollectSample(void* context, Sampler* sampler) {
#endif
#if V8_OS_LINUX
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
// Old GLibc ARM versions used a gregs[] array to access the register
// values from mcontext_t.
state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
#else
state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
#endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
#elif V8_HOST_ARCH_ARM64
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.sp);
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.sp);
// FP is an alias for x29.
state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
#elif V8_HOST_ARCH_MIPS
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
#elif V8_HOST_ARCH_MIPS64
state.pc = reinterpret_cast<Address>(mcontext.pc);
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
#elif V8_HOST_ARCH_PPC
state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
state->sp =
reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
state->fp =
reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
#elif V8_HOST_ARCH_S390
#if V8_TARGET_ARCH_32_BIT
// 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
// mode. This bit needs to be masked out to resolve actual address.
state.pc =
reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
state->pc =
reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
#else
state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr);
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
#endif // V8_TARGET_ARCH_32_BIT
state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]);
state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]);
state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64
#if __DARWIN_UNIX03
state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
#else // !__DARWIN_UNIX03
state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
#endif // __DARWIN_UNIX03
#elif V8_HOST_ARCH_IA32
#if __DARWIN_UNIX03
state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
#else // !__DARWIN_UNIX03
state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
state->pc = reinterpret_cast<void*>(mcontext->ss.eip);
state->sp = reinterpret_cast<void*>(mcontext->ss.esp);
state->fp = reinterpret_cast<void*>(mcontext->ss.ebp);
#endif // __DARWIN_UNIX03
#endif // V8_HOST_ARCH_IA32
#elif V8_OS_FREEBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
state->pc = reinterpret_cast<void*>(mcontext.mc_r15);
state->sp = reinterpret_cast<void*>(mcontext.mc_r13);
state->fp = reinterpret_cast<void*>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
#elif V8_OS_NETBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
#elif V8_HOST_ARCH_X64
state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_OPENBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
#elif V8_HOST_ARCH_X64
state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH_*
#elif V8_OS_SOLARIS
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
#elif V8_OS_QNX
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
#elif V8_HOST_ARCH_ARM
state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_AIX
state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar);
state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]);
state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]);
state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
#endif // V8_OS_AIX
#endif // USE_SIMULATOR
sampler->SampleStack(state);
}
#endif // V8_OS_NACL
#endif // USE_SIGNALS
class SamplerThread : public base::Thread {
public:
static const int kSamplerThreadStackSize = 64 * KB;
explicit SamplerThread(int interval)
: Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
static void TearDown() { delete mutex_; mutex_ = NULL; }
static void AddActiveSampler(Sampler* sampler) {
bool need_to_start = false;
base::LockGuard<base::Mutex> lock_guard(mutex_);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
instance_ = new SamplerThread(sampler->interval());
need_to_start = true;
}
DCHECK(sampler->IsActive());
DCHECK(instance_->interval_ == sampler->interval());
#if defined(USE_SIGNALS)
AddSampler(sampler);
#else
DCHECK(!instance_->active_samplers_.Contains(sampler));
instance_->active_samplers_.Add(sampler);
#endif // USE_SIGNALS
if (need_to_start) instance_->StartSynchronously();
}
static void RemoveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
base::LockGuard<base::Mutex> lock_guard(mutex_);
DCHECK(sampler->IsActive() || sampler->IsRegistered());
#if defined(USE_SIGNALS)
{
AtomicGuard atomic_guard(&sampler_list_access_counter_);
// Remove sampler from map.
pthread_t thread_id = sampler->platform_data()->vm_tid();
void* thread_key = ThreadKey(thread_id);
uint32_t thread_hash = ThreadHash(thread_id);
HashMap::Entry* entry =
thread_id_to_samplers_.Get().Lookup(thread_key, thread_hash);
DCHECK(entry != NULL);
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
samplers->RemoveElement(sampler);
if (samplers->is_empty()) {
thread_id_to_samplers_.Pointer()->Remove(thread_key, thread_hash);
delete samplers;
}
if (thread_id_to_samplers_.Get().occupancy() == 0) {
instance_to_remove = instance_;
instance_ = NULL;
}
}
#else
bool removed = instance_->active_samplers_.RemoveElement(sampler);
DCHECK(removed);
USE(removed);
// We cannot delete the instance immediately as we need to Join() the
// thread but we are holding mutex_ and the thread may try to acquire it.
if (instance_->active_samplers_.is_empty()) {
instance_to_remove = instance_;
instance_ = NULL;
}
#endif // USE_SIGNALS
}
if (!instance_to_remove) return;
instance_to_remove->Join();
delete instance_to_remove;
}
// Unlike AddActiveSampler, this method only adds a sampler,
// but won't start the sampler thread.
static void RegisterSampler(Sampler* sampler) {
base::LockGuard<base::Mutex> lock_guard(mutex_);
#if defined(USE_SIGNALS)
AddSampler(sampler);
#endif // USE_SIGNALS
}
// Implement Thread::Run().
virtual void Run() {
while (true) {
{
base::LockGuard<base::Mutex> lock_guard(mutex_);
#if defined(USE_SIGNALS)
if (thread_id_to_samplers_.Get().occupancy() == 0) break;
if (SignalHandler::Installed()) {
for (HashMap::Entry *p = thread_id_to_samplers_.Get().Start();
p != NULL; p = thread_id_to_samplers_.Get().Next(p)) {
#if V8_OS_AIX && V8_TARGET_ARCH_PPC64
// on AIX64, cannot cast (void *) to pthread_t which is
// of type unsigned int (4bytes)
pthread_t thread_id = reinterpret_cast<intptr_t>(p->key);
#else
pthread_t thread_id = reinterpret_cast<pthread_t>(p->key);
#endif
pthread_kill(thread_id, SIGPROF);
}
}
#else
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
for (int i = 0; i < active_samplers_.length(); ++i) {
Sampler* sampler = active_samplers_.at(i);
if (!sampler->IsProfiling()) continue;
sampler->DoSample();
}
#endif // USE_SIGNALS
}
base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
}
}
private:
// Protects the process wide state below.
static base::Mutex* mutex_;
static SamplerThread* instance_;
const int interval_;
#endif // !V8_OS_NACL
#if defined(USE_SIGNALS)
struct HashMapCreateTrait {
static void Construct(HashMap* allocated_ptr) {
new (allocated_ptr) HashMap(HashMap::PointersMatch);
}
};
friend class SignalHandler;
static base::LazyInstance<HashMap, HashMapCreateTrait>::type
thread_id_to_samplers_;
static base::AtomicValue<int> sampler_list_access_counter_;
static void AddSampler(Sampler* sampler) {
AtomicGuard atomic_guard(&sampler_list_access_counter_);
// Add sampler into map if needed.
pthread_t thread_id = sampler->platform_data()->vm_tid();
HashMap::Entry *entry =
thread_id_to_samplers_.Pointer()->LookupOrInsert(ThreadKey(thread_id),
ThreadHash(thread_id));
if (entry->value == NULL) {
SamplerList* samplers = new SamplerList();
samplers->Add(sampler);
entry->value = samplers;
} else {
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
if (!samplers->Contains(sampler)) {
samplers->Add(sampler);
}
}
}
#else
SamplerList active_samplers_;
#endif // USE_SIGNALS
DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
base::Mutex* SamplerThread::mutex_ = NULL;
SamplerThread* SamplerThread::instance_ = NULL;
#if defined(USE_SIGNALS)
base::LazyInstance<HashMap, SamplerThread::HashMapCreateTrait>::type
SamplerThread::thread_id_to_samplers_ = LAZY_INSTANCE_INITIALIZER;
base::AtomicValue<int> SamplerThread::sampler_list_access_counter_(0);
// As Native Client does not support signal handling, profiling is disabled.
#if !V8_OS_NACL
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
USE(info);
if (signal != SIGPROF) return;
AtomicGuard atomic_guard(&SamplerThread::sampler_list_access_counter_, false);
if (!atomic_guard.is_success()) return;
pthread_t thread_id = pthread_self();
HashMap::Entry* entry =
SamplerThread::thread_id_to_samplers_.Pointer()->Lookup(
ThreadKey(thread_id), ThreadHash(thread_id));
if (entry == NULL)
return;
SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
for (int i = 0; i < samplers->length(); ++i) {
Sampler* sampler = samplers->at(i);
CollectSample(context, sampler);
}
}
#endif // !V8_OS_NACL
#endif // USE_SIGNALs
void Sampler::SetUp() {
#if defined(USE_SIGNALS)
SignalHandler::SetUp();
#endif
SamplerThread::SetUp();
}
void Sampler::TearDown() {
SamplerThread::TearDown();
#if defined(USE_SIGNALS)
SignalHandler::TearDown();
#endif
}
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
Sampler::Sampler(Isolate* isolate)
: is_counting_samples_(false),
js_sample_count_(0),
external_sample_count_(0),
isolate_(isolate),
profiling_(false),
has_processing_thread_(false),
active_(false),
registered_(false),
is_counting_samples_(false),
js_sample_count_(0),
external_sample_count_(0) {
registered_(false) {
data_ = new PlatformData;
}
Sampler::~Sampler() {
DCHECK(!IsActive());
#if defined(USE_SIGNALS)
if (IsRegistered()) {
SamplerThread::RemoveSampler(this);
SamplerManager::RemoveSampler(this);
}
#endif
delete data_;
}
void Sampler::Start() {
DCHECK(!IsActive());
SetActive(true);
SamplerThread::AddActiveSampler(this);
#if defined(USE_SIGNALS)
SamplerManager::AddSampler(this);
#endif
}
void Sampler::Stop() {
#if defined(USE_SIGNALS)
SamplerManager::RemoveSampler(this);
#endif
DCHECK(IsActive());
SamplerThread::RemoveSampler(this);
SetActive(false);
SetRegistered(false);
}
......@@ -758,28 +633,12 @@ void Sampler::DecreaseProfilingDepth() {
}
void Sampler::SampleStack(const v8::RegisterState& state) {
TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
if (is_counting_samples_ && !sample->timestamp.IsNull()) {
if (sample->state == JS) ++js_sample_count_;
if (sample->state == EXTERNAL) ++external_sample_count_;
}
Tick(sample);
if (sample != &sample_obj) {
isolate_->cpu_profiler()->FinishTickSample();
}
}
#if defined(USE_SIGNALS)
void Sampler::DoSample() {
if (!SignalHandler::Installed()) return;
if (!IsActive() && !IsRegistered()) {
SamplerThread::RegisterSampler(this);
SamplerManager::AddSampler(this);
SetRegistered(true);
}
pthread_kill(platform_data()->vm_tid(), SIGPROF);
......@@ -800,22 +659,15 @@ void Sampler::DoSample() {
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread, &context) != 0) {
v8::RegisterState state;
#if defined(USE_SIMULATOR)
if (!SimulatorHelper::FillRegisters(isolate(), &state)) {
ResumeThread(profiled_thread);
return;
}
#else
#if V8_HOST_ARCH_X64
state.pc = reinterpret_cast<Address>(context.Rip);
state.sp = reinterpret_cast<Address>(context.Rsp);
state.fp = reinterpret_cast<Address>(context.Rbp);
state.pc = reinterpret_cast<void*>(context.Rip);
state.sp = reinterpret_cast<void*>(context.Rsp);
state.fp = reinterpret_cast<void*>(context.Rbp);
#else
state.pc = reinterpret_cast<Address>(context.Eip);
state.sp = reinterpret_cast<Address>(context.Esp);
state.fp = reinterpret_cast<Address>(context.Ebp);
state.pc = reinterpret_cast<void*>(context.Eip);
state.sp = reinterpret_cast<void*>(context.Esp);
state.fp = reinterpret_cast<void*>(context.Ebp);
#endif
#endif // USE_SIMULATOR
SampleStack(state);
}
ResumeThread(profiled_thread);
......@@ -823,6 +675,5 @@ void Sampler::DoSample() {
#endif // USE_SIGNALS
} // namespace internal
} // namespace sampler
} // namespace v8
// Copyright 2013 the V8 project authors. All rights reserved.
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_PROFILER_SAMPLER_H_
#define V8_PROFILER_SAMPLER_H_
#ifndef V8_LIBSAMPLER_SAMPLER_H_
#define V8_LIBSAMPLER_SAMPLER_H_
#include "include/v8.h"
......@@ -11,10 +11,7 @@
#include "src/base/macros.h"
namespace v8 {
namespace internal {
class Isolate;
struct TickSample;
namespace sampler {
// ----------------------------------------------------------------------------
// Sampler
......@@ -25,19 +22,23 @@ struct TickSample;
class Sampler {
public:
static const int kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1u << kMaxFramesCountLog2) - 1;
// Initializes the Sampler support. Called once at VM startup.
static void SetUp();
static void TearDown();
// Initialize sampler.
Sampler(Isolate* isolate, int interval);
explicit Sampler(Isolate* isolate);
virtual ~Sampler();
Isolate* isolate() const { return isolate_; }
int interval() const { return interval_; }
// Performs stack sampling.
void SampleStack(const v8::RegisterState& regs);
// Clients should override this method in order to do something on samples,
// for example buffer samples in a queue.
virtual void SampleStack(const v8::RegisterState& regs) = 0;
// Start and stop sampler.
void Start();
......@@ -60,8 +61,7 @@ class Sampler {
bool IsRegistered() const { return base::NoBarrier_Load(&registered_); }
void DoSample();
// If true next sample must be initiated on the profiler event processor
// thread right after latest sample is processed.
void SetHasProcessingThread(bool value) {
base::NoBarrier_Store(&has_processing_thread_, value);
}
......@@ -79,30 +79,25 @@ class Sampler {
PlatformData* platform_data() const { return data_; }
protected:
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
// Counts stack samples taken in various VM states.
bool is_counting_samples_;
unsigned js_sample_count_;
unsigned external_sample_count_;
private:
void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
Isolate* isolate_;
const int interval_;
base::Atomic32 profiling_;
base::Atomic32 has_processing_thread_;
base::Atomic32 active_;
base::Atomic32 registered_;
PlatformData* data_; // Platform specific data.
// Counts stack samples taken in various VM states.
bool is_counting_samples_;
unsigned js_sample_count_;
unsigned external_sample_count_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
} // namespace internal
} // namespace sampler
} // namespace v8
#endif // V8_PROFILER_SAMPLER_H_
#endif // V8_LIBSAMPLER_SAMPLER_H_
......@@ -15,11 +15,12 @@
#include "src/global-handles.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/libsampler/v8-sampler.h"
#include "src/log-inl.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
#include "src/perf-jit.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/runtime-profiler.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
......@@ -539,6 +540,31 @@ void JitLogger::EndCodePosInfoEvent(AbstractCode* code,
}
// TODO(lpy): Keeping sampling thread inside V8 is a workaround currently,
// the reason is to reduce code duplication during migration to sampler library,
// sampling thread, as well as the sampler, will be moved to D8 eventually.
class SamplingThread : public base::Thread {
public:
static const int kSamplingThreadStackSize = 64 * KB;
SamplingThread(sampler::Sampler* sampler, int interval)
: base::Thread(base::Thread::Options("SamplingThread",
kSamplingThreadStackSize)),
sampler_(sampler),
interval_(interval) {}
void Run() override {
while (sampler_->IsProfiling()) {
sampler_->DoSample();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
}
}
private:
sampler::Sampler* sampler_;
const int interval_;
};
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
......@@ -611,16 +637,16 @@ class Profiler: public base::Thread {
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
class Ticker: public Sampler {
class Ticker: public sampler::Sampler {
public:
Ticker(Isolate* isolate, int interval):
Sampler(isolate, interval),
profiler_(NULL) {}
~Ticker() { if (IsActive()) Stop(); }
sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
profiler_(NULL),
sampling_thread_(new SamplingThread(this, interval)) {}
virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample);
~Ticker() {
if (IsActive()) Stop();
delete sampling_thread_;
}
void SetProfiler(Profiler* profiler) {
......@@ -628,16 +654,40 @@ class Ticker: public Sampler {
profiler_ = profiler;
IncreaseProfilingDepth();
if (!IsActive()) Start();
sampling_thread_->StartSynchronously();
}
void ClearProfiler() {
profiler_ = NULL;
if (IsActive()) Stop();
DecreaseProfilingDepth();
sampling_thread_->Join();
}
void SampleStack(const v8::RegisterState& state) override {
v8::Isolate* v8_isolate = isolate();
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
#if defined(USE_SIMULATOR)
SimulatorHelper::FillRegisters(isolate,
const_cast<v8::RegisterState*>(&state));
#endif
TickSample* sample = isolate->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
sample->Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
if (is_counting_samples_ && !sample->timestamp.IsNull()) {
if (sample->state == JS) ++js_sample_count_;
if (sample->state == EXTERNAL) ++external_sample_count_;
}
if (profiler_) profiler_->Insert(sample);
if (sample != &sample_obj) {
isolate->cpu_profiler()->FinishTickSample();
}
}
private:
Profiler* profiler_;
SamplingThread* sampling_thread_;
};
......@@ -1810,7 +1860,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
Sampler* Logger::sampler() {
sampler::Sampler* Logger::sampler() {
return ticker_;
}
......
......@@ -19,6 +19,10 @@ namespace base {
class Semaphore;
}
namespace sampler {
class Sampler;
}
namespace internal {
// Logger is used for collecting logging information from V8 during
......@@ -141,7 +145,6 @@ class JitLogger;
class PerfBasicLogger;
class LowLevelLogger;
class PerfJitLogger;
class Sampler;
class Logger {
public:
......@@ -161,7 +164,7 @@ class Logger {
void SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler);
Sampler* sampler();
sampler::Sampler* sampler();
// Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
......
......@@ -21,7 +21,7 @@ static const int kProfilerStackSize = 64 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
sampler::Sampler* sampler,
base::TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
......@@ -566,7 +566,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
sampler::Sampler* sampler = logger->sampler();
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
is_profiling_ = true;
......@@ -612,7 +612,8 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler::Sampler* sampler =
reinterpret_cast<sampler::Sampler*>(logger->ticker_);
is_profiling_ = false;
processor_->StopSynchronously();
delete processor_;
......
......@@ -10,9 +10,9 @@
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
#include "src/libsampler/v8-sampler.h"
#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/sampler.h"
#include "src/profiler/tick-sample.h"
namespace v8 {
......@@ -128,7 +128,7 @@ class CodeEventsContainer {
class ProfilerEventsProcessor : public base::Thread {
public:
ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
sampler::Sampler* sampler,
base::TimeDelta period);
virtual ~ProfilerEventsProcessor();
......@@ -166,7 +166,7 @@ class ProfilerEventsProcessor : public base::Thread {
SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
Sampler* sampler_;
sampler::Sampler* sampler_;
base::Atomic32 running_;
const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
......
......@@ -14,9 +14,9 @@
#include "src/elements.h"
#include "src/frames.h"
#include "src/isolate.h"
#include "src/libsampler/v8-sampler.h"
#include "src/objects.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/sampler.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
......@@ -48,7 +48,7 @@ void V8::TearDown() {
ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
Sampler::TearDown();
sampler::Sampler::TearDown();
FlagList::ResetAllFlags(); // Frees memory held by string arguments.
}
......@@ -76,7 +76,7 @@ void V8::InitializeOncePerProcessImpl() {
Isolate::InitializeOncePerProcess();
Sampler::SetUp();
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
......
......@@ -381,6 +381,7 @@
'type': 'static_library',
'dependencies': [
'v8_libbase',
'v8_libsampler',
],
'variables': {
'optimize': 'max',
......@@ -1001,8 +1002,6 @@
'profiler/profile-generator-inl.h',
'profiler/profile-generator.cc',
'profiler/profile-generator.h',
'profiler/sampler.cc',
'profiler/sampler.h',
'profiler/sampling-heap-profiler.cc',
'profiler/sampling-heap-profiler.h',
'profiler/strings-storage.cc',
......@@ -1939,6 +1938,38 @@
],
},
},
{
'target_name': 'v8_libsampler',
'type': 'static_library',
'variables': {
'optimize': 'max',
},
'dependencies': [
'v8_libbase',
],
'include_dirs+': [
'..',
'../include',
],
'sources': [
'libsampler/hashmap.h',
'libsampler/utils.h',
'libsampler/v8-sampler.cc',
'libsampler/v8-sampler.h'
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
],
'direct_dependent_settings': {
'include_dirs': [
'../include',
],
},
},
{
'target_name': 'natives_blob',
'type': 'none',
......
......@@ -108,6 +108,7 @@
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-spaces.cc',
'libsampler/test-sampler.cc',
'print-extension.cc',
'profiler-extension.cc',
'test-accessors.cc',
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Tests of sampler functionalities.
#include "src/libsampler/v8-sampler.h"
#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace sampler {
namespace {
class TestSamplingThread : public base::Thread {
public:
static const int kSamplerThreadStackSize = 64 * 1024;
explicit TestSamplingThread(Sampler* sampler)
: Thread(base::Thread::Options("TestSamplingThread",
kSamplerThreadStackSize)),
sampler_(sampler) {}
// Implement Thread::Run().
void Run() override {
while (sampler_->IsProfiling()) {
sampler_->DoSample();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
}
}
private:
Sampler* sampler_;
};
class TestSampler : public Sampler {
public:
explicit TestSampler(Isolate* isolate) : Sampler(isolate) {}
void SampleStack(const v8::RegisterState& regs) override {
void* frames[Sampler::kMaxFramesCount];
SampleInfo sample_info;
isolate()->GetStackSample(regs, reinterpret_cast<void**>(frames),
Sampler::kMaxFramesCount, &sample_info);
if (is_counting_samples_) {
if (sample_info.vm_state == JS) ++js_sample_count_;
if (sample_info.vm_state == EXTERNAL) ++external_sample_count_;
}
}
};
class TestApiCallbacks {
public:
TestApiCallbacks() {}
static void Getter(v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
}
static void Setter(v8::Local<v8::String> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
}
};
static void RunSampler(v8::Local<v8::Context> env,
v8::Local<v8::Function> function,
v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples = 0,
unsigned min_external_samples = 0) {
Sampler::SetUp();
TestSampler* sampler = new TestSampler(env->GetIsolate());
TestSamplingThread* thread = new TestSamplingThread(sampler);
sampler->IncreaseProfilingDepth();
sampler->Start();
sampler->StartCountingSamples();
thread->StartSynchronously();
do {
function->Call(env, env->Global(), argc, argv).ToLocalChecked();
} while (sampler->js_sample_count() < min_js_samples ||
sampler->external_sample_count() < min_external_samples);
sampler->Stop();
sampler->DecreaseProfilingDepth();
thread->Join();
delete thread;
delete sampler;
Sampler::TearDown();
}
} // namespace
static const char* sampler_test_source = "function start(count) {\n"
" for (var i = 0; i < count; i++) {\n"
" var o = instance.foo;\n"
" instance.foo = o + 1;\n"
" }\n"
"}\n";
static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
const char* name) {
return v8::Local<v8::Function>::Cast(
env->Global()->Get(env, v8_str(name)).ToLocalChecked());
}
TEST(LibSamplerCollectSample) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
v8::Local<v8::FunctionTemplate> func_template =
v8::FunctionTemplate::New(isolate);
v8::Local<v8::ObjectTemplate> instance_template =
func_template->InstanceTemplate();
TestApiCallbacks accessors;
v8::Local<v8::External> data =
v8::External::New(isolate, &accessors);
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
&TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func =
func_template->GetFunction(env.local()).ToLocalChecked();
v8::Local<v8::Object> instance =
func->NewInstance(env.local()).ToLocalChecked();
env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
CompileRun(sampler_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
RunSampler(env.local(), function, args, arraysize(args), 100, 100);
}
} // namespace sampler
} // namespace v8
......@@ -422,7 +422,7 @@ static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
cpu_profiler->SetSamplingInterval(100);
cpu_profiler->StartProfiling(profile_name, collect_samples);
i::Sampler* sampler =
v8::sampler::Sampler* sampler =
reinterpret_cast<i::Isolate*>(env->GetIsolate())->logger()->sampler();
sampler->StartCountingSamples();
do {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment