Commit 06cc9b7c authored by lpy's avatar lpy Committed by Commit bot

Create libsampler as V8 sampler library.

This patch does five things:

1. Extracts sampler as libsampler to provide sampling functionality support.
2. Makes SampleStack virtual so embedders can override the behaviour of sample collecting.
3. Removes sampler.[h|cc].
4. Moves sampling thread into log.cc as workaround to keep the --prof functionality.
5. Creates SamplerManager to manage the relationship between samplers and threads.

The reason we port hashmap.h is that in debug mode, STL containers are using
mutexes from a mutex pool, which may lead to deadlock when using asynchronously
signal handler.

Currently libsampler is used in V8 temporarily.

BUG=v8:4789
LOG=n

Review-Url: https://codereview.chromium.org/1922303002
Cr-Commit-Position: refs/heads/master@{#36527}
parent d84dbc71
......@@ -105,6 +105,11 @@ config("libplatform_config") {
include_dirs = [ "include" ]
}
# This config should be applied to code using the libsampler.
config("libsampler_config") {
include_dirs = [ "include" ]
}
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
......@@ -1329,8 +1334,6 @@ v8_source_set("v8_base") {
"src/profiler/profile-generator-inl.h",
"src/profiler/profile-generator.cc",
"src/profiler/profile-generator.h",
"src/profiler/sampler.cc",
"src/profiler/sampler.h",
"src/profiler/sampling-heap-profiler.cc",
"src/profiler/sampling-heap-profiler.h",
"src/profiler/strings-storage.cc",
......@@ -1816,6 +1819,7 @@ v8_source_set("v8_base") {
defines = []
deps = [
":v8_libbase",
":v8_libsampler",
]
if (is_win) {
......@@ -1970,6 +1974,23 @@ v8_source_set("v8_libplatform") {
]
}
v8_source_set("v8_libsampler") {
sources = [
"src/libsampler/hashmap.h",
"src/libsampler/utils.h",
"src/libsampler/v8-sampler.cc",
"src/libsampler/v8-sampler.h",
]
configs = [ ":internal_config_base" ]
public_configs = [ ":libsampler_config" ]
deps = [
":v8_libbase",
]
}
v8_source_set("fuzzer_support") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
......
......@@ -6331,6 +6331,12 @@ class V8_EXPORT Isolate {
*/
void VisitWeakHandles(PersistentHandleVisitor* visitor);
/**
* Check if this isolate is in use.
* True if at least one thread Enter'ed this isolate.
*/
bool IsInUse();
private:
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
......
......@@ -7793,6 +7793,12 @@ void Isolate::VisitExternalResources(ExternalResourceVisitor* visitor) {
}
bool Isolate::IsInUse() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsInUse();
}
class VisitorAdapter : public i::ObjectVisitor {
public:
explicit VisitorAdapter(PersistentHandleVisitor* visitor)
......
......@@ -27,10 +27,10 @@
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/libsampler/v8-sampler.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/sampler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
......@@ -2006,7 +2006,7 @@ void Isolate::Deinit() {
}
// We must stop the logger before we tear down other components.
Sampler* sampler = logger_->sampler();
sampler::Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
delete deoptimizer_data_;
......
include_rules = [
"+include",
"-src",
"+src/base",
"+src/libsampler",
]
\ No newline at end of file
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is ported from src/hashmap.h
#ifndef V8_LIBSAMPLER_HASHMAP_H_
#define V8_LIBSAMPLER_HASHMAP_H_
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/libsampler/utils.h"
namespace v8 {
namespace sampler {
class HashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
// The default capacity.
static const uint32_t kDefaultHashMapCapacity = 8;
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
HashMapImpl(MatchFun match,
uint32_t capacity = kDefaultHashMapCapacity);
~HashMapImpl();
// HashMap entries are (key, value, hash) triplets.
// Some clients may not need to use the value slot
// (e.g. implementers of sets, where the key is the value).
struct Entry {
void* key;
void* value;
uint32_t hash; // The full hash value for key
int order; // If you never remove entries this is the insertion order.
};
// If an entry with matching key is found, returns that entry.
// Otherwise, NULL is returned.
Entry* Lookup(void* key, uint32_t hash) const;
// If an entry with matching key is found, returns that entry.
// If no matching entry is found, a new entry is inserted with
// corresponding key, key hash, and NULL value.
Entry* LookupOrInsert(void* key, uint32_t hash);
// Removes the entry with matching key.
// It returns the value of the deleted entry
// or null if there is no value for such key.
void* Remove(void* key, uint32_t hash);
// Empties the hash map (occupancy() == 0).
void Clear();
// The number of (non-empty) entries in the table.
uint32_t occupancy() const { return occupancy_; }
// The capacity of the table. The implementation
// makes sure that occupancy is at most 80% of
// the table capacity.
uint32_t capacity() const { return capacity_; }
// Iteration
//
// for (Entry* p = map.Start(); p != NULL; p = map.Next(p)) {
// ...
// }
//
// If entries are inserted during iteration, the effect of
// calling Next() is undefined.
Entry* Start() const;
Entry* Next(Entry* p) const;
// Some match functions defined for convenience.
static bool PointersMatch(void* key1, void* key2) {
return key1 == key2;
}
private:
MatchFun match_;
Entry* map_;
uint32_t capacity_;
uint32_t occupancy_;
Entry* map_end() const { return map_ + capacity_; }
Entry* Probe(void* key, uint32_t hash) const;
void Initialize(uint32_t capacity);
void Resize();
};
typedef HashMapImpl HashMap;
HashMapImpl::HashMapImpl(MatchFun match, uint32_t initial_capacity) {
match_ = match;
Initialize(initial_capacity);
}
HashMapImpl::~HashMapImpl() {
Malloced::Delete(map_);
}
HashMapImpl::Entry* HashMapImpl::Lookup(void* key, uint32_t hash) const {
Entry* p = Probe(key, hash);
return p->key != NULL ? p : NULL;
}
HashMapImpl::Entry* HashMapImpl::LookupOrInsert(void* key, uint32_t hash) {
// Find a matching entry.
Entry* p = Probe(key, hash);
if (p->key != NULL) {
return p;
}
// No entry found; insert one.
p->key = key;
p->value = NULL;
p->hash = hash;
p->order = occupancy_;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_ / 4 >= capacity_) {
Resize();
p = Probe(key, hash);
}
return p;
}
void* HashMapImpl::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
// Key not found nothing to remove.
return NULL;
}
void* value = p->value;
// To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their
// initial position inside this interval, clearing the entry to remove will
// not break the search. If, while searching for the next empty entry, an
// entry is encountered which does not have its initial position between the
// entry to remove and the position looked at, then this entry can be moved to
// the place of the entry to remove without breaking the search for it. The
// entry made vacant by this move is now the entry to remove and the process
// starts over.
// Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
// This guarantees loop termination as there is at least one empty entry so
// eventually the removed entry will have an empty entry after it.
DCHECK(occupancy_ < capacity_);
// p is the candidate entry to clear. q is used to scan forwards.
Entry* q = p; // Start at the entry to remove.
while (true) {
// Move q to the next entry.
q = q + 1;
if (q == map_end()) {
q = map_;
}
// All entries between p and q have their initial position between p and q
// and the entry p can be cleared without breaking the search for these
// entries.
if (q->key == NULL) {
break;
}
// Find the initial position for the entry at position q.
Entry* r = map_ + (q->hash & (capacity_ - 1));
// If the entry at position q has its initial position outside the range
// between p and q it can be moved forward to position p and will still be
// found. There is now a new candidate entry for clearing.
if ((q > p && (r <= p || r > q)) ||
(q < p && (r <= p && r > q))) {
*p = *q;
p = q;
}
}
// Clear the entry which is allowed to en emptied.
p->key = NULL;
occupancy_--;
return value;
}
void HashMapImpl::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
p->key = NULL;
}
occupancy_ = 0;
}
HashMapImpl::Entry* HashMapImpl::Start() const {
return Next(map_ - 1);
}
HashMapImpl::Entry* HashMapImpl::Next(Entry* p) const {
const Entry* end = map_end();
DCHECK(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
if (p->key != NULL) {
return p;
}
}
return NULL;
}
HashMapImpl::Entry* HashMapImpl::Probe(void* key, uint32_t hash) const {
DCHECK(key != NULL);
DCHECK(base::bits::IsPowerOfTwo32(capacity_));
Entry* p = map_ + (hash & (capacity_ - 1));
const Entry* end = map_end();
DCHECK(map_ <= p && p < end);
DCHECK(occupancy_ < capacity_); // Guarantees loop termination.
while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
p++;
if (p >= end) {
p = map_;
}
}
return p;
}
void HashMapImpl::Initialize(uint32_t capacity) {
DCHECK(base::bits::IsPowerOfTwo32(capacity));
map_ = reinterpret_cast<Entry*>(Malloced::New(capacity * sizeof(Entry)));
CHECK(map_ != NULL);
capacity_ = capacity;
Clear();
}
void HashMapImpl::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
// Allocate larger map.
Initialize(capacity_ * 2);
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
Entry* entry = LookupOrInsert(p->key, p->hash);
entry->value = p->value;
entry->order = p->order;
n--;
}
}
// Delete old map.
Malloced::Delete(map);
}
} // namespace sampler
} // namespace v8
#endif // V8_LIBSAMPLER_HASHMAP_H_
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_LIBSAMPLER_UTILS_H_
#define V8_LIBSAMPLER_UTILS_H_
#include "include/v8.h"
namespace v8 {
namespace sampler {
class Malloced {
public:
static void* New(size_t size) {
return malloc(size);
}
static void Delete(void* p) {
free(p);
}
};
} // namespace sampler
} // namespace v8
#endif // V8_LIBSAMPLER_UTILS_H_
// Copyright 2013 the V8 project authors. All rights reserved.
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_PROFILER_SAMPLER_H_
#define V8_PROFILER_SAMPLER_H_
#ifndef V8_LIBSAMPLER_SAMPLER_H_
#define V8_LIBSAMPLER_SAMPLER_H_
#include "include/v8.h"
......@@ -11,10 +11,7 @@
#include "src/base/macros.h"
namespace v8 {
namespace internal {
class Isolate;
struct TickSample;
namespace sampler {
// ----------------------------------------------------------------------------
// Sampler
......@@ -25,19 +22,23 @@ struct TickSample;
class Sampler {
public:
static const int kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1u << kMaxFramesCountLog2) - 1;
// Initializes the Sampler support. Called once at VM startup.
static void SetUp();
static void TearDown();
// Initialize sampler.
Sampler(Isolate* isolate, int interval);
explicit Sampler(Isolate* isolate);
virtual ~Sampler();
Isolate* isolate() const { return isolate_; }
int interval() const { return interval_; }
// Performs stack sampling.
void SampleStack(const v8::RegisterState& regs);
// Clients should override this method in order to do something on samples,
// for example buffer samples in a queue.
virtual void SampleStack(const v8::RegisterState& regs) = 0;
// Start and stop sampler.
void Start();
......@@ -60,8 +61,7 @@ class Sampler {
bool IsRegistered() const { return base::NoBarrier_Load(&registered_); }
void DoSample();
// If true next sample must be initiated on the profiler event processor
// thread right after latest sample is processed.
void SetHasProcessingThread(bool value) {
base::NoBarrier_Store(&has_processing_thread_, value);
}
......@@ -79,30 +79,25 @@ class Sampler {
PlatformData* platform_data() const { return data_; }
protected:
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
// Counts stack samples taken in various VM states.
bool is_counting_samples_;
unsigned js_sample_count_;
unsigned external_sample_count_;
private:
void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
Isolate* isolate_;
const int interval_;
base::Atomic32 profiling_;
base::Atomic32 has_processing_thread_;
base::Atomic32 active_;
base::Atomic32 registered_;
PlatformData* data_; // Platform specific data.
// Counts stack samples taken in various VM states.
bool is_counting_samples_;
unsigned js_sample_count_;
unsigned external_sample_count_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
} // namespace internal
} // namespace sampler
} // namespace v8
#endif // V8_PROFILER_SAMPLER_H_
#endif // V8_LIBSAMPLER_SAMPLER_H_
......@@ -15,11 +15,12 @@
#include "src/global-handles.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/libsampler/v8-sampler.h"
#include "src/log-inl.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
#include "src/perf-jit.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/runtime-profiler.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
......@@ -539,6 +540,31 @@ void JitLogger::EndCodePosInfoEvent(AbstractCode* code,
}
// TODO(lpy): Keeping sampling thread inside V8 is a workaround currently,
// the reason is to reduce code duplication during migration to sampler library,
// sampling thread, as well as the sampler, will be moved to D8 eventually.
class SamplingThread : public base::Thread {
public:
static const int kSamplingThreadStackSize = 64 * KB;
SamplingThread(sampler::Sampler* sampler, int interval)
: base::Thread(base::Thread::Options("SamplingThread",
kSamplingThreadStackSize)),
sampler_(sampler),
interval_(interval) {}
void Run() override {
while (sampler_->IsProfiling()) {
sampler_->DoSample();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
}
}
private:
sampler::Sampler* sampler_;
const int interval_;
};
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
......@@ -611,16 +637,16 @@ class Profiler: public base::Thread {
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
class Ticker: public Sampler {
class Ticker: public sampler::Sampler {
public:
Ticker(Isolate* isolate, int interval):
Sampler(isolate, interval),
profiler_(NULL) {}
~Ticker() { if (IsActive()) Stop(); }
sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
profiler_(NULL),
sampling_thread_(new SamplingThread(this, interval)) {}
virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample);
~Ticker() {
if (IsActive()) Stop();
delete sampling_thread_;
}
void SetProfiler(Profiler* profiler) {
......@@ -628,16 +654,40 @@ class Ticker: public Sampler {
profiler_ = profiler;
IncreaseProfilingDepth();
if (!IsActive()) Start();
sampling_thread_->StartSynchronously();
}
void ClearProfiler() {
profiler_ = NULL;
if (IsActive()) Stop();
DecreaseProfilingDepth();
sampling_thread_->Join();
}
void SampleStack(const v8::RegisterState& state) override {
v8::Isolate* v8_isolate = isolate();
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
#if defined(USE_SIMULATOR)
SimulatorHelper::FillRegisters(isolate,
const_cast<v8::RegisterState*>(&state));
#endif
TickSample* sample = isolate->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
sample->Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
if (is_counting_samples_ && !sample->timestamp.IsNull()) {
if (sample->state == JS) ++js_sample_count_;
if (sample->state == EXTERNAL) ++external_sample_count_;
}
if (profiler_) profiler_->Insert(sample);
if (sample != &sample_obj) {
isolate->cpu_profiler()->FinishTickSample();
}
}
private:
Profiler* profiler_;
SamplingThread* sampling_thread_;
};
......@@ -1810,7 +1860,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
Sampler* Logger::sampler() {
sampler::Sampler* Logger::sampler() {
return ticker_;
}
......
......@@ -19,6 +19,10 @@ namespace base {
class Semaphore;
}
namespace sampler {
class Sampler;
}
namespace internal {
// Logger is used for collecting logging information from V8 during
......@@ -141,7 +145,6 @@ class JitLogger;
class PerfBasicLogger;
class LowLevelLogger;
class PerfJitLogger;
class Sampler;
class Logger {
public:
......@@ -161,7 +164,7 @@ class Logger {
void SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler);
Sampler* sampler();
sampler::Sampler* sampler();
// Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
......
......@@ -21,7 +21,7 @@ static const int kProfilerStackSize = 64 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
sampler::Sampler* sampler,
base::TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
......@@ -566,7 +566,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
sampler::Sampler* sampler = logger->sampler();
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
is_profiling_ = true;
......@@ -612,7 +612,8 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler::Sampler* sampler =
reinterpret_cast<sampler::Sampler*>(logger->ticker_);
is_profiling_ = false;
processor_->StopSynchronously();
delete processor_;
......
......@@ -10,9 +10,9 @@
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
#include "src/libsampler/v8-sampler.h"
#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/sampler.h"
#include "src/profiler/tick-sample.h"
namespace v8 {
......@@ -128,7 +128,7 @@ class CodeEventsContainer {
class ProfilerEventsProcessor : public base::Thread {
public:
ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
sampler::Sampler* sampler,
base::TimeDelta period);
virtual ~ProfilerEventsProcessor();
......@@ -166,7 +166,7 @@ class ProfilerEventsProcessor : public base::Thread {
SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
Sampler* sampler_;
sampler::Sampler* sampler_;
base::Atomic32 running_;
const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
......
......@@ -14,9 +14,9 @@
#include "src/elements.h"
#include "src/frames.h"
#include "src/isolate.h"
#include "src/libsampler/v8-sampler.h"
#include "src/objects.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/sampler.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
......@@ -48,7 +48,7 @@ void V8::TearDown() {
ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
Sampler::TearDown();
sampler::Sampler::TearDown();
FlagList::ResetAllFlags(); // Frees memory held by string arguments.
}
......@@ -76,7 +76,7 @@ void V8::InitializeOncePerProcessImpl() {
Isolate::InitializeOncePerProcess();
Sampler::SetUp();
sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
......
......@@ -381,6 +381,7 @@
'type': 'static_library',
'dependencies': [
'v8_libbase',
'v8_libsampler',
],
'variables': {
'optimize': 'max',
......@@ -1001,8 +1002,6 @@
'profiler/profile-generator-inl.h',
'profiler/profile-generator.cc',
'profiler/profile-generator.h',
'profiler/sampler.cc',
'profiler/sampler.h',
'profiler/sampling-heap-profiler.cc',
'profiler/sampling-heap-profiler.h',
'profiler/strings-storage.cc',
......@@ -1939,6 +1938,38 @@
],
},
},
{
'target_name': 'v8_libsampler',
'type': 'static_library',
'variables': {
'optimize': 'max',
},
'dependencies': [
'v8_libbase',
],
'include_dirs+': [
'..',
'../include',
],
'sources': [
'libsampler/hashmap.h',
'libsampler/utils.h',
'libsampler/v8-sampler.cc',
'libsampler/v8-sampler.h'
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
],
'direct_dependent_settings': {
'include_dirs': [
'../include',
],
},
},
{
'target_name': 'natives_blob',
'type': 'none',
......
......@@ -108,6 +108,7 @@
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-spaces.cc',
'libsampler/test-sampler.cc',
'print-extension.cc',
'profiler-extension.cc',
'test-accessors.cc',
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Tests of sampler functionalities.
#include "src/libsampler/v8-sampler.h"
#include "src/base/platform/platform.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace sampler {
namespace {
class TestSamplingThread : public base::Thread {
public:
static const int kSamplerThreadStackSize = 64 * 1024;
explicit TestSamplingThread(Sampler* sampler)
: Thread(base::Thread::Options("TestSamplingThread",
kSamplerThreadStackSize)),
sampler_(sampler) {}
// Implement Thread::Run().
void Run() override {
while (sampler_->IsProfiling()) {
sampler_->DoSample();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
}
}
private:
Sampler* sampler_;
};
class TestSampler : public Sampler {
public:
explicit TestSampler(Isolate* isolate) : Sampler(isolate) {}
void SampleStack(const v8::RegisterState& regs) override {
void* frames[Sampler::kMaxFramesCount];
SampleInfo sample_info;
isolate()->GetStackSample(regs, reinterpret_cast<void**>(frames),
Sampler::kMaxFramesCount, &sample_info);
if (is_counting_samples_) {
if (sample_info.vm_state == JS) ++js_sample_count_;
if (sample_info.vm_state == EXTERNAL) ++external_sample_count_;
}
}
};
class TestApiCallbacks {
public:
TestApiCallbacks() {}
static void Getter(v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
}
static void Setter(v8::Local<v8::String> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
}
};
static void RunSampler(v8::Local<v8::Context> env,
v8::Local<v8::Function> function,
v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples = 0,
unsigned min_external_samples = 0) {
Sampler::SetUp();
TestSampler* sampler = new TestSampler(env->GetIsolate());
TestSamplingThread* thread = new TestSamplingThread(sampler);
sampler->IncreaseProfilingDepth();
sampler->Start();
sampler->StartCountingSamples();
thread->StartSynchronously();
do {
function->Call(env, env->Global(), argc, argv).ToLocalChecked();
} while (sampler->js_sample_count() < min_js_samples ||
sampler->external_sample_count() < min_external_samples);
sampler->Stop();
sampler->DecreaseProfilingDepth();
thread->Join();
delete thread;
delete sampler;
Sampler::TearDown();
}
} // namespace
static const char* sampler_test_source = "function start(count) {\n"
" for (var i = 0; i < count; i++) {\n"
" var o = instance.foo;\n"
" instance.foo = o + 1;\n"
" }\n"
"}\n";
static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
const char* name) {
return v8::Local<v8::Function>::Cast(
env->Global()->Get(env, v8_str(name)).ToLocalChecked());
}
TEST(LibSamplerCollectSample) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
v8::Local<v8::FunctionTemplate> func_template =
v8::FunctionTemplate::New(isolate);
v8::Local<v8::ObjectTemplate> instance_template =
func_template->InstanceTemplate();
TestApiCallbacks accessors;
v8::Local<v8::External> data =
v8::External::New(isolate, &accessors);
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
&TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func =
func_template->GetFunction(env.local()).ToLocalChecked();
v8::Local<v8::Object> instance =
func->NewInstance(env.local()).ToLocalChecked();
env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
CompileRun(sampler_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
RunSampler(env.local(), function, args, arraysize(args), 100, 100);
}
} // namespace sampler
} // namespace v8
......@@ -422,7 +422,7 @@ static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
cpu_profiler->SetSamplingInterval(100);
cpu_profiler->StartProfiling(profile_name, collect_samples);
i::Sampler* sampler =
v8::sampler::Sampler* sampler =
reinterpret_cast<i::Isolate*>(env->GetIsolate())->logger()->sampler();
sampler->StartCountingSamples();
do {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment