Commit 0aacfb2a authored by Andrew Comminos's avatar Andrew Comminos Committed by Commit Bot

[cpu-profiler] Reintroduce support for context filtering

As we can still intend to run the web-exposed profiler outside of an
origin-isolated environment, add support back for filtering by
v8::Context.

This reverts commit 05af3681.

Bug: chromium:956688
Change-Id: Idd98bea3213b5963f689a04de6c3743073efc587
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2785806Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Commit-Queue: Andrew Comminos <acomminos@fb.com>
Cr-Commit-Position: refs/heads/master@{#74112}
parent 32d3c92d
......@@ -289,8 +289,8 @@ class V8_EXPORT CpuProfilingOptions {
* interval, set via SetSamplingInterval(). If
* zero, the sampling interval will be equal to
* the profiler's sampling interval.
* \param filter_context Deprecated option to filter by context, currently a
* no-op.
* \param filter_context If specified, profiles will only contain frames
* using this context. Other frames will be elided.
*/
CpuProfilingOptions(
CpuProfilingMode mode = kLeafNodeLineNumbers,
......@@ -304,9 +304,13 @@ class V8_EXPORT CpuProfilingOptions {
private:
friend class internal::CpuProfile;
bool has_filter_context() const { return !filter_context_.IsEmpty(); }
void* raw_filter_context() const;
CpuProfilingMode mode_;
unsigned max_samples_;
int sampling_interval_us_;
CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
};
/**
......
......@@ -2424,7 +2424,7 @@ struct SampleInfo {
StateTag vm_state; // Current VM state.
void* external_callback_entry; // External callback address if VM is
// executing an external callback.
void* top_context; // Incumbent native context address.
void* context; // Incumbent native context address.
};
struct MemoryRange {
......
......@@ -9478,7 +9478,21 @@ CpuProfilingOptions::CpuProfilingOptions(CpuProfilingMode mode,
MaybeLocal<Context> filter_context)
: mode_(mode),
max_samples_(max_samples),
sampling_interval_us_(sampling_interval_us) {}
sampling_interval_us_(sampling_interval_us) {
if (!filter_context.IsEmpty()) {
Local<Context> local_filter_context = filter_context.ToLocalChecked();
filter_context_.Reset(local_filter_context->GetIsolate(),
local_filter_context);
filter_context_.SetWeak();
}
}
void* CpuProfilingOptions::raw_filter_context() const {
return reinterpret_cast<void*>(
i::Context::cast(*Utils::OpenPersistent(filter_context_))
.native_context()
.address());
}
void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
......
......@@ -3195,6 +3195,9 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
if (target.IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
} else if (target.IsNativeContext()) {
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
if (FLAG_verify_predictable) {
......
......@@ -98,6 +98,7 @@ class CodeEventListener {
// Not handlified as this happens during GC. No allocation allowed.
virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void NativeContextMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
virtual void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) = 0;
......@@ -217,6 +218,11 @@ class CodeEventDispatcher : public CodeEventListener {
listener->SharedFunctionInfoMoveEvent(from, to);
});
}
void NativeContextMoveEvent(Address from, Address to) override {
DispatchEventToListeners([=](CodeEventListener* listener) {
listener->NativeContextMoveEvent(from, to);
});
}
void CodeMovingGCEvent() override {
DispatchEventToListeners(
[](CodeEventListener* listener) { listener->CodeMovingGCEvent(); });
......
......@@ -208,6 +208,7 @@ class Logger : public CodeEventListener {
Handle<String> source) override;
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override;
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
......@@ -410,6 +411,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta, bool reuse_code) override {}
......@@ -475,6 +477,7 @@ class ExternalCodeEventListener : public CodeEventListener {
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
......
......@@ -104,10 +104,11 @@ ProfilingScope::~ProfilingScope() {
ProfilerEventsProcessor::ProfilerEventsProcessor(
Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer)
ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
symbolizer_(symbolizer),
code_observer_(code_observer),
profiles_(profiles),
last_code_event_id_(0),
last_processed_code_event_id_(0),
isolate_(isolate) {
......@@ -119,9 +120,8 @@ SamplingEventsProcessor::SamplingEventsProcessor(
Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
base::TimeDelta period, bool use_precise_sampling)
: ProfilerEventsProcessor(isolate, symbolizer, code_observer),
: ProfilerEventsProcessor(isolate, symbolizer, code_observer, profiles),
sampler_(new CpuSampler(isolate, this)),
profiles_(profiles),
period_(period),
use_precise_sampling_(use_precise_sampling) {
sampler_->Start();
......@@ -188,7 +188,14 @@ void ProfilerEventsProcessor::StopSynchronously() {
bool ProfilerEventsProcessor::ProcessCodeEvent() {
CodeEventsContainer record;
if (events_buffer_.Dequeue(&record)) {
code_observer_->CodeEventHandlerInternal(record);
if (record.generic.type == CodeEventRecord::NATIVE_CONTEXT_MOVE) {
NativeContextMoveEventRecord& nc_record =
record.NativeContextMoveEventRecord_;
profiles_->UpdateNativeContextAddressForCurrentProfiles(
nc_record.from_address, nc_record.to_address);
} else {
code_observer_->CodeEventHandlerInternal(record);
}
last_processed_code_event_id_ = record.generic.order;
return true;
}
......@@ -202,6 +209,7 @@ void ProfilerEventsProcessor::CodeEventHandler(
case CodeEventRecord::CODE_MOVE:
case CodeEventRecord::CODE_DISABLE_OPT:
case CodeEventRecord::CODE_DELETE:
case CodeEventRecord::NATIVE_CONTEXT_MOVE:
Enqueue(evt_rec);
break;
case CodeEventRecord::CODE_DEOPT: {
......@@ -224,7 +232,8 @@ void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
symbolizer_->SymbolizeTickSample(record->sample);
profiles_->AddPathToCurrentProfiles(
record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
record->sample.update_stats, record->sample.sampling_interval);
record->sample.update_stats, record->sample.sampling_interval,
reinterpret_cast<Address>(record->sample.context));
}
ProfilerEventsProcessor::SampleProcessingResult
......
......@@ -37,10 +37,14 @@ class Symbolizer;
V(REPORT_BUILTIN, ReportBuiltinEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord)
#define VM_EVENTS_TYPE_LIST(V) \
CODE_EVENTS_TYPE_LIST(V) \
V(NATIVE_CONTEXT_MOVE, NativeContextMoveEventRecord)
class CodeEventRecord {
public:
#define DECLARE_TYPE(type, ignore) type,
enum Type { NONE = 0, CODE_EVENTS_TYPE_LIST(DECLARE_TYPE) };
enum Type { NONE = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) };
#undef DECLARE_TYPE
Type type;
......@@ -99,6 +103,13 @@ class ReportBuiltinEventRecord : public CodeEventRecord {
V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
// Signals that a native context's address has changed.
class NativeContextMoveEventRecord : public CodeEventRecord {
public:
Address from_address;
Address to_address;
};
// A record type for sending samples from the main thread/signal handler to the
// profiling thread.
class TickSampleEventRecord {
......@@ -130,7 +141,7 @@ class CodeEventsContainer {
union {
CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
VM_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_CLASS
};
};
......@@ -174,7 +185,8 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
protected:
ProfilerEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer);
ProfilerCodeObserver* code_observer,
CpuProfilesCollection* profiles);
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
......@@ -188,6 +200,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
Symbolizer* symbolizer_;
ProfilerCodeObserver* code_observer_;
CpuProfilesCollection* profiles_;
std::atomic_bool running_{true};
base::ConditionVariable running_cond_;
base::Mutex running_mutex_;
......@@ -238,7 +251,6 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
std::unique_ptr<sampler::Sampler> sampler_;
CpuProfilesCollection* profiles_;
base::TimeDelta period_; // Samples & code events processing period.
const bool use_precise_sampling_; // Whether or not busy-waiting is used for
// low sampling intervals on Windows.
......
......@@ -533,6 +533,12 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
}
void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
if (native_context_address() != from_address) return;
set_native_context_address(to_address);
}
using v8::tracing::TracedValue;
std::atomic<uint32_t> CpuProfile::last_id_;
......@@ -557,6 +563,13 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
"Profile", id_, "data", std::move(value));
DisallowHeapAllocation no_gc;
if (options_.has_filter_context()) {
i::Address raw_filter_context =
reinterpret_cast<i::Address>(options_.raw_filter_context());
context_filter_.set_native_context_address(raw_filter_context);
}
}
bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
......@@ -706,6 +719,8 @@ void CpuProfile::StreamPendingTraceEvents() {
void CpuProfile::FinishProfile() {
end_time_ = base::TimeTicks::HighResolutionNow();
// Stop tracking context movements after profiling stops.
context_filter_.set_native_context_address(kNullAddress);
StreamPendingTraceEvents();
auto value = TracedValue::Create();
// The endTime timestamp is not converted to Perfetto's clock domain and will
......@@ -942,14 +957,26 @@ base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
void CpuProfilesCollection::AddPathToCurrentProfiles(
base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
bool update_stats, base::TimeDelta sampling_interval) {
bool update_stats, base::TimeDelta sampling_interval,
Address native_context_address) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
profile->AddPath(timestamp, path, src_line, update_stats,
sampling_interval);
if (profile->context_filter().Accept(native_context_address)) {
profile->AddPath(timestamp, path, src_line, update_stats,
sampling_interval);
}
}
current_profiles_semaphore_.Signal();
}
void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
Address from, Address to) {
current_profiles_semaphore_.Wait();
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
profile->context_filter().OnMoveEvent(from, to);
}
current_profiles_semaphore_.Signal();
}
......
......@@ -237,6 +237,31 @@ struct CodeEntryAndLineNumber {
using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
// Filters stack frames from sources other than a target native context.
class ContextFilter {
public:
explicit ContextFilter(Address native_context_address = kNullAddress)
: native_context_address_(native_context_address) {}
// Invoked when a native context has changed address.
void OnMoveEvent(Address from_address, Address to_address);
bool Accept(Address native_context_address) const {
if (native_context_address_ == kNullAddress) return true;
return (native_context_address & ~kHeapObjectTag) ==
native_context_address_;
}
// Update the context's tracked address based on VM-thread events.
void set_native_context_address(Address address) {
native_context_address_ = address;
}
Address native_context_address() const { return native_context_address_; }
private:
Address native_context_address_;
};
class ProfileTree;
class V8_EXPORT_PRIVATE ProfileNode {
......@@ -386,6 +411,7 @@ class CpuProfile {
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
CpuProfiler* cpu_profiler() const { return profiler_; }
ContextFilter& context_filter() { return context_filter_; }
void UpdateTicksScale();
......@@ -397,6 +423,7 @@ class CpuProfile {
const char* title_;
const CpuProfilingOptions options_;
std::unique_ptr<DiscardedSamplesDelegate> delegate_;
ContextFilter context_filter_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
std::deque<SampleInfo> samples_;
......@@ -486,7 +513,11 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
const ProfileStackTrace& path, int src_line,
bool update_stats,
base::TimeDelta sampling_interval);
base::TimeDelta sampling_interval,
Address native_context_address = kNullAddress);
// Called from profile generator thread.
void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
......
......@@ -302,6 +302,13 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::NativeContextMoveEvent(Address from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE);
evt_rec.NativeContextMoveEventRecord_.from_address = from;
evt_rec.NativeContextMoveEventRecord_.to_address = to;
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
......
......@@ -59,6 +59,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener,
Handle<String> source) override;
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override;
void CodeMovingGCEvent() override {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
......
......@@ -177,6 +177,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
pc = regs.pc;
frames_count = static_cast<unsigned>(info.frames_count);
has_external_callback = info.external_callback_entry != nullptr;
context = info.context;
if (has_external_callback) {
external_callback_entry = info.external_callback_entry;
} else if (frames_count) {
......@@ -209,6 +210,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
sample_info->frames_count = 0;
sample_info->vm_state = isolate->current_vm_state();
sample_info->external_callback_entry = nullptr;
sample_info->context = nullptr;
if (sample_info->vm_state == GC) return true;
i::Address js_entry_sp = isolate->js_entry_sp();
......@@ -278,6 +280,13 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
reinterpret_cast<i::Address>(regs->lr),
js_entry_sp);
Context top_context = isolate->context();
if (top_context.ptr() != i::Context::kNoContext &&
top_context.ptr() != i::Context::kInvalidContext) {
NativeContext top_native_context = top_context.native_context();
sample_info->context = reinterpret_cast<void*>(top_native_context.ptr());
}
if (it.done()) return true;
size_t i = 0;
......
......@@ -90,6 +90,7 @@ struct V8_EXPORT TickSample {
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
void* stack[kMaxFramesCount]; // Call stack.
void* context = nullptr; // Address of the incumbent native context.
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
bool update_stats : 1; // Whether the sample should update aggregated stats.
......
......@@ -1310,6 +1310,7 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
Handle<String> source) final {}
void CodeMoveEvent(AbstractCode from, AbstractCode to) final {}
void SharedFunctionInfoMoveEvent(Address from, Address to) final {}
void NativeContextMoveEvent(Address from, Address to) final {}
void CodeMovingGCEvent() final {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) final {}
......
......@@ -546,7 +546,8 @@ class ProfilerHelper {
v8::Local<v8::Function> function, v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples = 0, unsigned min_external_samples = 0,
ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit);
unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit,
v8::Local<v8::Context> context = v8::Local<v8::Context>());
v8::CpuProfiler* profiler() { return profiler_; }
......@@ -559,11 +560,12 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples,
unsigned min_external_samples,
ProfilingMode mode, unsigned max_samples) {
ProfilingMode mode, unsigned max_samples,
v8::Local<v8::Context> context) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
profiler_->SetSamplingInterval(50);
profiler_->StartProfiling(profile_name, {mode, max_samples, 0});
profiler_->StartProfiling(profile_name, {mode, max_samples, 0, context});
v8::internal::CpuProfiler* iprofiler =
reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
......@@ -3803,6 +3805,140 @@ TEST(Bug9151StaleCodeEntries) {
CHECK(callback);
}
// Tests that functions from other contexts aren't recorded when filtering for
// another context.
TEST(ContextIsolation) {
i::FLAG_allow_natives_syntax = true;
LocalContext execution_env;
i::HandleScope scope(CcTest::i_isolate());
// Install CollectSample callback for more deterministic sampling.
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
execution_env.local()->GetIsolate(), CallCollectSample);
v8::Local<v8::Function> func =
func_template->GetFunction(execution_env.local()).ToLocalChecked();
func->SetName(v8_str("CallCollectSample"));
execution_env->Global()
->Set(execution_env.local(), v8_str("CallCollectSample"), func)
.FromJust();
ProfilerHelper helper(execution_env.local());
CompileRun(R"(
function optimized() {
CallCollectSample();
}
function unoptimized() {
CallCollectSample();
}
function start() {
// Test optimized functions
%PrepareFunctionForOptimization(optimized);
optimized();
optimized();
%OptimizeFunctionOnNextCall(optimized);
optimized();
// Test unoptimized functions
%NeverOptimizeFunction(unoptimized);
unoptimized();
// Test callback
CallCollectSample();
}
)");
v8::Local<v8::Function> function =
GetFunction(execution_env.local(), "start");
v8::CpuProfile* same_context_profile = helper.Run(
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
const v8::CpuProfileNode* root = same_context_profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = FindChild(root, "start");
CHECK(start_node);
const v8::CpuProfileNode* optimized_node = FindChild(start_node, "optimized");
CHECK(optimized_node);
const v8::CpuProfileNode* unoptimized_node =
FindChild(start_node, "unoptimized");
CHECK(unoptimized_node);
const v8::CpuProfileNode* callback_node =
FindChild(start_node, "CallCollectSample");
CHECK(callback_node);
{
LocalContext filter_env;
v8::CpuProfile* diff_context_profile = helper.Run(
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, filter_env.local());
const v8::CpuProfileNode* diff_root =
diff_context_profile->GetTopDownRoot();
// Ensure that no children were recorded (including callbacks, builtins).
CHECK(!FindChild(diff_root, "start"));
}
}
// Tests that when a native context that's being filtered is moved, we continue
// to track its execution.
TEST(ContextFilterMovedNativeContext) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_manual_evacuation_candidates_selection = true;
LocalContext env;
i::HandleScope scope(CcTest::i_isolate());
{
// Install CollectSample callback for more deterministic sampling.
v8::Local<v8::FunctionTemplate> sample_func_template =
v8::FunctionTemplate::New(env.local()->GetIsolate(), CallCollectSample);
v8::Local<v8::Function> sample_func =
sample_func_template->GetFunction(env.local()).ToLocalChecked();
sample_func->SetName(v8_str("CallCollectSample"));
env->Global()
->Set(env.local(), v8_str("CallCollectSample"), sample_func)
.FromJust();
// Install a function that triggers the native context to be moved.
v8::Local<v8::FunctionTemplate> move_func_template =
v8::FunctionTemplate::New(
env.local()->GetIsolate(),
[](const v8::FunctionCallbackInfo<v8::Value>& info) {
i::Isolate* isolate =
reinterpret_cast<i::Isolate*>(info.GetIsolate());
i::heap::ForceEvacuationCandidate(
i::Page::FromHeapObject(isolate->raw_native_context()));
CcTest::CollectAllGarbage();
});
v8::Local<v8::Function> move_func =
move_func_template->GetFunction(env.local()).ToLocalChecked();
move_func->SetName(v8_str("ForceNativeContextMove"));
env->Global()
->Set(env.local(), v8_str("ForceNativeContextMove"), move_func)
.FromJust();
ProfilerHelper helper(env.local());
CompileRun(R"(
function start() {
ForceNativeContextMove();
CallCollectSample();
}
)");
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
v8::CpuProfile* profile = helper.Run(
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, env.local());
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = FindChild(root, "start");
CHECK(start_node);
// Verify that after moving the native context, CallCollectSample is still
// recorded.
const v8::CpuProfileNode* callback_node =
FindChild(start_node, "CallCollectSample");
CHECK(callback_node);
}
}
enum class EntryCountMode { kAll, kOnlyInlined };
// Count the number of unique source positions.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment