Commit 05af3681 authored by Andrew Comminos's avatar Andrew Comminos Committed by Commit Bot

[cpu-profiler] Remove support for context filtering

Since the web-exposed profiler will require COOP/COEP, it is no longer
necessary to perform isolation at the V8 level. Strip the unnecessary
complexity and unreliability of context filtering accordingly.

Bug: chromium:956688, v8:9881, v8:9860
Change-Id: I21a30d51f8daf7565ec95de8c265e9d3b9d10fad
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2386144
Commit-Queue: Andrew Comminos <acomminos@fb.com>
Reviewed-by: 's avatarPeter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69894}
parent d24457fa
......@@ -269,6 +269,8 @@ class V8_EXPORT CpuProfilingOptions {
* interval, set via SetSamplingInterval(). If
* zero, the sampling interval will be equal to
* the profiler's sampling interval.
* \param filter_context Deprecated option to filter by context, currently a
* no-op.
*/
CpuProfilingOptions(
CpuProfilingMode mode = kLeafNodeLineNumbers,
......@@ -282,13 +284,9 @@ class V8_EXPORT CpuProfilingOptions {
private:
friend class internal::CpuProfile;
bool has_filter_context() const { return !filter_context_.IsEmpty(); }
void* raw_filter_context() const;
CpuProfilingMode mode_;
unsigned max_samples_;
int sampling_interval_us_;
CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
};
/**
......
......@@ -10669,20 +10669,7 @@ CpuProfilingOptions::CpuProfilingOptions(CpuProfilingMode mode,
MaybeLocal<Context> filter_context)
: mode_(mode),
max_samples_(max_samples),
sampling_interval_us_(sampling_interval_us) {
if (!filter_context.IsEmpty()) {
Local<Context> local_filter_context = filter_context.ToLocalChecked();
filter_context_.Reset(local_filter_context->GetIsolate(),
local_filter_context);
}
}
void* CpuProfilingOptions::raw_filter_context() const {
return reinterpret_cast<void*>(
i::Context::cast(*Utils::OpenPersistent(filter_context_))
.native_context()
.address());
}
sampling_interval_us_(sampling_interval_us) {}
void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
......
......@@ -3100,9 +3100,6 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
if (target.IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
} else if (target.IsNativeContext()) {
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
if (FLAG_verify_predictable) {
......
......@@ -94,7 +94,6 @@ class CodeEventListener {
// Not handlified as this happens during GC. No allocation allowed.
virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void NativeContextMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
virtual void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) = 0;
......@@ -205,11 +204,6 @@ class CodeEventDispatcher : public CodeEventListener {
listener->SharedFunctionInfoMoveEvent(from, to);
});
}
void NativeContextMoveEvent(Address from, Address to) override {
DispatchEventToListeners([=](CodeEventListener* listener) {
listener->NativeContextMoveEvent(from, to);
});
}
void CodeMovingGCEvent() override {
DispatchEventToListeners(
[](CodeEventListener* listener) { listener->CodeMovingGCEvent(); });
......
......@@ -209,7 +209,6 @@ class Logger : public CodeEventListener {
Handle<String> source) override;
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override;
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
......@@ -406,7 +405,6 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta, bool reuse_code) override {}
......@@ -466,7 +464,6 @@ class ExternalCodeEventListener : public CodeEventListener {
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
......
......@@ -171,14 +171,7 @@ void ProfilerEventsProcessor::StopSynchronously() {
bool ProfilerEventsProcessor::ProcessCodeEvent() {
CodeEventsContainer record;
if (events_buffer_.Dequeue(&record)) {
if (record.generic.type == CodeEventRecord::NATIVE_CONTEXT_MOVE) {
NativeContextMoveEventRecord& nc_record =
record.NativeContextMoveEventRecord_;
generator_->UpdateNativeContextAddress(nc_record.from_address,
nc_record.to_address);
} else {
code_observer_->CodeEventHandlerInternal(record);
}
code_observer_->CodeEventHandlerInternal(record);
last_processed_code_event_id_ = record.generic.order;
return true;
}
......@@ -191,7 +184,6 @@ void ProfilerEventsProcessor::CodeEventHandler(
case CodeEventRecord::CODE_CREATION:
case CodeEventRecord::CODE_MOVE:
case CodeEventRecord::CODE_DISABLE_OPT:
case CodeEventRecord::NATIVE_CONTEXT_MOVE:
Enqueue(evt_rec);
break;
case CodeEventRecord::CODE_DEOPT: {
......
......@@ -36,14 +36,10 @@ class ProfileGenerator;
V(CODE_DEOPT, CodeDeoptEventRecord) \
V(REPORT_BUILTIN, ReportBuiltinEventRecord)
#define VM_EVENTS_TYPE_LIST(V) \
CODE_EVENTS_TYPE_LIST(V) \
V(NATIVE_CONTEXT_MOVE, NativeContextMoveEventRecord)
class CodeEventRecord {
public:
#define DECLARE_TYPE(type, ignore) type,
enum Type { NONE = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) };
enum Type { NONE = 0, CODE_EVENTS_TYPE_LIST(DECLARE_TYPE) };
#undef DECLARE_TYPE
Type type;
......@@ -101,13 +97,6 @@ class ReportBuiltinEventRecord : public CodeEventRecord {
V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
// Signals that a native context's address has changed.
class NativeContextMoveEventRecord : public CodeEventRecord {
public:
Address from_address;
Address to_address;
};
// A record type for sending samples from the main thread/signal handler to the
// profiling thread.
class TickSampleEventRecord {
......@@ -132,7 +121,7 @@ class CodeEventsContainer {
union {
CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
VM_EVENTS_TYPE_LIST(DECLARE_CLASS)
CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_CLASS
};
};
......
......@@ -398,14 +398,12 @@ ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
int src_line, bool update_stats,
ProfilingMode mode,
ContextFilter* context_filter) {
ProfilingMode mode) {
ProfileNode* node = root_;
CodeEntry* last_entry = nullptr;
int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
for (auto it = path.rbegin(); it != path.rend(); ++it) {
if (it->entry.code_entry == nullptr) continue;
if (context_filter && !context_filter->Accept(*it)) continue;
last_entry = (*it).entry.code_entry;
node = node->FindOrAddChild((*it).entry.code_entry, parent_line_number);
parent_line_number = mode == ProfilingMode::kCallerLineNumbers
......@@ -465,21 +463,6 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
}
bool ContextFilter::Accept(const ProfileStackFrame& frame) {
// If a frame should always be included in profiles (e.g. metadata frames),
// skip the context check.
if (!frame.filterable) return true;
// Strip heap object tag from frame.
return (frame.native_context & ~kHeapObjectTag) == native_context_address_;
}
void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
if (native_context_address() != from_address) return;
set_native_context_address(to_address);
}
using v8::tracing::TracedValue;
std::atomic<uint32_t> CpuProfile::last_id_;
......@@ -502,13 +485,6 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
"Profile", id_, "data", std::move(value));
if (options_.has_filter_context()) {
DisallowHeapAllocation no_gc;
i::Address raw_filter_context =
reinterpret_cast<i::Address>(options_.raw_filter_context());
context_filter_ = std::make_unique<ContextFilter>(raw_filter_context);
}
}
bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
......@@ -533,8 +509,8 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
bool update_stats, base::TimeDelta sampling_interval) {
if (!CheckSubsample(sampling_interval)) return;
ProfileNode* top_frame_node = top_down_.AddPathFromEnd(
path, src_line, update_stats, options_.mode(), context_filter_.get());
ProfileNode* top_frame_node =
top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
bool should_record_sample =
!timestamp.IsNull() && timestamp >= start_time_ &&
......@@ -646,8 +622,6 @@ void CpuProfile::StreamPendingTraceEvents() {
void CpuProfile::FinishProfile() {
end_time_ = base::TimeTicks::HighResolutionNow();
// Stop tracking context movements after profiling stops.
context_filter_ = nullptr;
StreamPendingTraceEvents();
auto value = TracedValue::Create();
// The endTime timestamp is not converted to Perfetto's clock domain and will
......@@ -859,17 +833,6 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
Address from, Address to) {
current_profiles_semaphore_.Wait();
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
if (auto* context_filter = profile->context_filter()) {
context_filter->OnMoveEvent(from, to);
}
}
current_profiles_semaphore_.Signal();
}
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles,
CodeMap* code_map)
: profiles_(profiles), code_map_(code_map) {}
......@@ -896,9 +859,7 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
// that a callback calls itself.
stack_trace.push_back({{FindEntry(reinterpret_cast<Address>(
sample.external_callback_entry)),
no_line_info},
reinterpret_cast<Address>(sample.top_context),
true});
no_line_info}});
} else {
Address attributed_pc = reinterpret_cast<Address>(sample.pc);
Address pc_entry_instruction_start = kNullAddress;
......@@ -924,9 +885,7 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
src_line = pc_entry->line_number();
}
src_line_not_found = false;
stack_trace.push_back({{pc_entry, src_line},
reinterpret_cast<Address>(sample.top_context),
true});
stack_trace.push_back({{pc_entry, src_line}});
if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
......@@ -940,9 +899,7 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kInCallOrApply);
stack_trace.push_back(
{{CodeEntry::unresolved_entry(), no_line_info},
kNullAddress,
true});
{{CodeEntry::unresolved_entry(), no_line_info}});
}
}
}
......@@ -950,7 +907,6 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
for (unsigned i = 0; i < sample.frames_count; ++i) {
Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
Address native_context = reinterpret_cast<Address>(sample.contexts[i]);
Address instruction_start = kNullAddress;
CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
int line_number = no_line_info;
......@@ -963,10 +919,7 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
if (inline_stack) {
int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
for (auto entry : *inline_stack) {
// Set the native context of inlined frames to be equal to that of
// their parent. This is safe, as functions cannot inline themselves
// into a parent from another native context.
stack_trace.push_back({entry, native_context, true});
stack_trace.push_back({entry});
}
// This is a bit of a messy hack. The line number for the most-inlined
......@@ -997,7 +950,7 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
// so we use it instead of pushing entry to stack_trace.
if (inline_stack) continue;
}
stack_trace.push_back({{entry, line_number}, native_context, true});
stack_trace.push_back({{entry, line_number}});
}
}
......@@ -1017,8 +970,7 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kNoSymbolizedFrames);
}
stack_trace.push_back(
{{EntryForVMState(sample.state), no_line_info}, kNullAddress, false});
stack_trace.push_back({{EntryForVMState(sample.state), no_line_info}});
}
}
......@@ -1027,10 +979,6 @@ void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
sample.sampling_interval);
}
void ProfileGenerator::UpdateNativeContextAddress(Address from, Address to) {
profiles_->UpdateNativeContextAddressForCurrentProfiles(from, to);
}
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
switch (tag) {
case GC:
......
......@@ -230,35 +230,10 @@ struct CodeEntryAndLineNumber {
struct ProfileStackFrame {
CodeEntryAndLineNumber entry;
Address native_context;
bool filterable; // If true, the frame should be filtered by context (if a
// filter is present).
};
typedef std::vector<ProfileStackFrame> ProfileStackTrace;
// Filters stack frames from sources other than a target native context.
class ContextFilter {
public:
explicit ContextFilter(Address native_context_address)
: native_context_address_(native_context_address) {}
// Returns true if the stack frame passes a context check.
bool Accept(const ProfileStackFrame&);
// Invoked when a native context has changed address.
void OnMoveEvent(Address from_address, Address to_address);
// Update the context's tracked address based on VM-thread events.
void set_native_context_address(Address address) {
native_context_address_ = address;
}
Address native_context_address() const { return native_context_address_; }
private:
Address native_context_address_;
};
class ProfileTree;
class V8_EXPORT_PRIVATE ProfileNode {
......@@ -343,8 +318,7 @@ class V8_EXPORT_PRIVATE ProfileTree {
const ProfileStackTrace& path,
int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
bool update_stats = true,
ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
ContextFilter* context_filter = nullptr);
ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
ProfileNode* root() const { return root_; }
unsigned next_node_id() { return next_node_id_++; }
......@@ -406,7 +380,6 @@ class CpuProfile {
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
CpuProfiler* cpu_profiler() const { return profiler_; }
ContextFilter* context_filter() const { return context_filter_.get(); }
void UpdateTicksScale();
......@@ -417,7 +390,6 @@ class CpuProfile {
const char* title_;
const CpuProfilingOptions options_;
std::unique_ptr<ContextFilter> context_filter_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
std::deque<SampleInfo> samples_;
......@@ -496,9 +468,6 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
bool update_stats,
base::TimeDelta sampling_interval);
// Called from profile generator thread.
void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
......@@ -523,8 +492,6 @@ class V8_EXPORT_PRIVATE ProfileGenerator {
// profiles in the CpuProfilesCollection.
void SymbolizeTickSample(const TickSample& sample);
void UpdateNativeContextAddress(Address from, Address to);
CodeMap* code_map() { return code_map_; }
private:
......
......@@ -258,13 +258,6 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::NativeContextMoveEvent(Address from, Address to) {
CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE);
evt_rec.NativeContextMoveEventRecord_.from_address = from;
evt_rec.NativeContextMoveEventRecord_.to_address = to;
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
......
......@@ -50,7 +50,6 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
Handle<String> source) override;
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void NativeContextMoveEvent(Address from, Address to) override;
void CodeMovingGCEvent() override {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
......
......@@ -144,22 +144,6 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
}
#endif // USE_SIMULATOR
// Returns the native context for a JavaScript frame. If the frame wasn't a
// JavaScript frame, it'll return kNullAddress.
Address ScrapeNativeContextAddress(Heap* heap, Address context_address) {
#if !defined(V8_TARGET_ARCH_IA32) && !defined(V8_TARGET_ARCH_X64)
return kNullAddress;
#else
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
// If the value is tagged, we're looking at a JavaScript frame.
if (!HAS_STRONG_HEAP_OBJECT_TAG(context_address)) return kNullAddress;
i::Object object(context_address);
return i::Context::cast(object).map().native_context().ptr();
#endif
}
} // namespace
DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
......@@ -172,8 +156,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
SampleInfo info;
RegisterState regs = reg_state;
if (!GetStackSample(v8_isolate, &regs, record_c_entry_frame, stack,
kMaxFramesCount, &info, use_simulator_reg_state,
contexts)) {
kMaxFramesCount, &info, use_simulator_reg_state)) {
// It is executing JS but failed to collect a stack trace.
// Mark the sample as spoiled.
pc = nullptr;
......@@ -184,7 +167,6 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
pc = regs.pc;
frames_count = static_cast<unsigned>(info.frames_count);
has_external_callback = info.external_callback_entry != nullptr;
top_context = info.top_context;
if (has_external_callback) {
external_callback_entry = info.external_callback_entry;
} else if (frames_count) {
......@@ -211,12 +193,11 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
RecordCEntryFrame record_c_entry_frame,
void** frames, size_t frames_limit,
v8::SampleInfo* sample_info,
bool use_simulator_reg_state, void** contexts) {
bool use_simulator_reg_state) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
sample_info->frames_count = 0;
sample_info->vm_state = isolate->current_vm_state();
sample_info->external_callback_entry = nullptr;
sample_info->top_context = nullptr;
if (sample_info->vm_state == GC) return true;
i::Address js_entry_sp = isolate->js_entry_sp();
......@@ -269,14 +250,6 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
reinterpret_cast<i::Address>(regs->lr),
js_entry_sp);
i::Address top_context_address = it.top_context_address();
if (top_context_address != i::kNullAddress) {
sample_info->top_context = reinterpret_cast<void*>(
i::ScrapeNativeContextAddress(isolate->heap(), top_context_address));
} else {
sample_info->top_context = nullptr;
}
if (it.done()) return true;
size_t i = 0;
......@@ -284,46 +257,19 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
(it.top_frame_type() == internal::StackFrame::EXIT ||
it.top_frame_type() == internal::StackFrame::BUILTIN_EXIT)) {
frames[i] = reinterpret_cast<void*>(isolate->c_function());
if (contexts) contexts[i] = sample_info->top_context;
i++;
}
// If we couldn't get a context address from the top frame due to execution
// being in a callback, borrow it from the next context on the stack.
bool borrows_top_context = it.top_frame_type() == i::StackFrame::EXIT ||
it.top_frame_type() == i::StackFrame::BUILTIN_EXIT;
i::RuntimeCallTimer* timer =
isolate->counters()->runtime_call_stats()->current_timer();
for (; !it.done() && i < frames_limit; it.Advance()) {
while (timer && reinterpret_cast<i::Address>(timer) < it.frame()->fp() &&
i < frames_limit) {
if (contexts) contexts[i] = nullptr;
frames[i++] = reinterpret_cast<void*>(timer->counter());
timer = timer->parent();
}
if (i == frames_limit) break;
// Attempt to read the native context associated with the frame from the
// heap for standard frames.
if (it.frame()->is_standard() && (contexts || borrows_top_context)) {
i::Address context_address = base::Memory<i::Address>(
it.frame()->fp() + i::StandardFrameConstants::kContextOffset);
i::Address native_context_address =
i::ScrapeNativeContextAddress(isolate->heap(), context_address);
if (contexts)
contexts[i] = reinterpret_cast<void*>(native_context_address);
if (borrows_top_context) {
DCHECK(!sample_info->top_context);
sample_info->top_context =
reinterpret_cast<void*>(native_context_address);
}
} else if (contexts) {
contexts[i] = nullptr;
}
borrows_top_context = false;
if (it.frame()->is_interpreted()) {
// For interpreted frames use the bytecode array pointer as the pc.
i::InterpretedFrame* frame =
......
......@@ -69,8 +69,7 @@ struct V8_EXPORT TickSample {
RecordCEntryFrame record_c_entry_frame,
void** frames, size_t frames_limit,
v8::SampleInfo* sample_info,
bool use_simulator_reg_state = true,
void** contexts = nullptr);
bool use_simulator_reg_state = true);
void print() const;
......@@ -83,8 +82,6 @@ struct V8_EXPORT TickSample {
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
void* stack[kMaxFramesCount]; // Call stack.
void* contexts[kMaxFramesCount]; // Stack of associated native contexts.
void* top_context = nullptr; // Address of the incumbent native context.
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
bool update_stats : 1; // Whether the sample should update aggregated stats.
......
......@@ -1598,7 +1598,6 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
Handle<String> source) final {}
void CodeMoveEvent(AbstractCode from, AbstractCode to) final {}
void SharedFunctionInfoMoveEvent(Address from, Address to) final {}
void NativeContextMoveEvent(Address from, Address to) final {}
void CodeMovingGCEvent() final {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) final {}
......
......@@ -129,14 +129,6 @@
'test-persistent-handles/NewPersistentHandleFailsWhenParkedExplicit': [FAIL],
}],
##############################################################################
['(arch != ia32 and arch != x64)', {
# BUG(v8:9860). We can only safely read the native context for a frame on
# ia32 and x64.
'test-cpu-profiler/ContextFilterMovedNativeContext': [FAIL],
'test-cpu-profiler/ContextIsolation': [FAIL],
}],
##############################################################################
['tsan == True', {
# BUG(v8:9869) TSAN considers SIGPROF an asynchronous signal, and will call
......
......@@ -450,8 +450,7 @@ class ProfilerHelper {
v8::Local<v8::Function> function, v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples = 0, unsigned min_external_samples = 0,
ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit,
v8::Local<v8::Context> context = v8::Local<v8::Context>());
unsigned max_samples = v8::CpuProfilingOptions::kNoSampleLimit);
v8::CpuProfiler* profiler() { return profiler_; }
......@@ -464,12 +463,11 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples,
unsigned min_external_samples,
ProfilingMode mode, unsigned max_samples,
v8::Local<v8::Context> context) {
ProfilingMode mode, unsigned max_samples) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
profiler_->SetSamplingInterval(100);
profiler_->StartProfiling(profile_name, {mode, max_samples, 0, context});
profiler_->StartProfiling(profile_name, {mode, max_samples, 0});
v8::internal::CpuProfiler* iprofiler =
reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
......@@ -3446,140 +3444,6 @@ TEST(Bug9151StaleCodeEntries) {
CHECK(callback);
}
// Tests that functions from other contexts aren't recorded when filtering for
// another context.
TEST(ContextIsolation) {
i::FLAG_allow_natives_syntax = true;
LocalContext execution_env;
i::HandleScope scope(CcTest::i_isolate());
// Install CollectSample callback for more deterministic sampling.
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
execution_env.local()->GetIsolate(), CallCollectSample);
v8::Local<v8::Function> func =
func_template->GetFunction(execution_env.local()).ToLocalChecked();
func->SetName(v8_str("CallCollectSample"));
execution_env->Global()
->Set(execution_env.local(), v8_str("CallCollectSample"), func)
.FromJust();
ProfilerHelper helper(execution_env.local());
CompileRun(R"(
function optimized() {
CallCollectSample();
}
function unoptimized() {
CallCollectSample();
}
function start() {
// Test optimized functions
%PrepareFunctionForOptimization(optimized);
optimized();
optimized();
%OptimizeFunctionOnNextCall(optimized);
optimized();
// Test unoptimized functions
%NeverOptimizeFunction(unoptimized);
unoptimized();
// Test callback
CallCollectSample();
}
)");
v8::Local<v8::Function> function =
GetFunction(execution_env.local(), "start");
v8::CpuProfile* same_context_profile = helper.Run(
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, execution_env.local());
const v8::CpuProfileNode* root = same_context_profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = FindChild(root, "start");
CHECK(start_node);
const v8::CpuProfileNode* optimized_node = FindChild(start_node, "optimized");
CHECK(optimized_node);
const v8::CpuProfileNode* unoptimized_node =
FindChild(start_node, "unoptimized");
CHECK(unoptimized_node);
const v8::CpuProfileNode* callback_node =
FindChild(start_node, "CallCollectSample");
CHECK(callback_node);
{
LocalContext filter_env;
v8::CpuProfile* diff_context_profile = helper.Run(
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, filter_env.local());
const v8::CpuProfileNode* diff_root =
diff_context_profile->GetTopDownRoot();
// Ensure that no children were recorded (including callbacks, builtins).
CHECK(!FindChild(diff_root, "start"));
}
}
// Tests that when a native context that's being filtered is moved, we continue
// to track its execution.
TEST(ContextFilterMovedNativeContext) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_manual_evacuation_candidates_selection = true;
LocalContext env;
i::HandleScope scope(CcTest::i_isolate());
{
// Install CollectSample callback for more deterministic sampling.
v8::Local<v8::FunctionTemplate> sample_func_template =
v8::FunctionTemplate::New(env.local()->GetIsolate(), CallCollectSample);
v8::Local<v8::Function> sample_func =
sample_func_template->GetFunction(env.local()).ToLocalChecked();
sample_func->SetName(v8_str("CallCollectSample"));
env->Global()
->Set(env.local(), v8_str("CallCollectSample"), sample_func)
.FromJust();
// Install a function that triggers the native context to be moved.
v8::Local<v8::FunctionTemplate> move_func_template =
v8::FunctionTemplate::New(
env.local()->GetIsolate(),
[](const v8::FunctionCallbackInfo<v8::Value>& info) {
i::Isolate* isolate =
reinterpret_cast<i::Isolate*>(info.GetIsolate());
i::heap::ForceEvacuationCandidate(
i::Page::FromHeapObject(isolate->raw_native_context()));
CcTest::CollectAllGarbage();
});
v8::Local<v8::Function> move_func =
move_func_template->GetFunction(env.local()).ToLocalChecked();
move_func->SetName(v8_str("ForceNativeContextMove"));
env->Global()
->Set(env.local(), v8_str("ForceNativeContextMove"), move_func)
.FromJust();
ProfilerHelper helper(env.local());
CompileRun(R"(
function start() {
ForceNativeContextMove();
CallCollectSample();
}
)");
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
v8::CpuProfile* profile = helper.Run(
function, nullptr, 0, 0, 0, v8::CpuProfilingMode::kLeafNodeLineNumbers,
v8::CpuProfilingOptions::kNoSampleLimit, env.local());
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = FindChild(root, "start");
CHECK(start_node);
// Verify that after moving the native context, CallCollectSample is still
// recorded.
const v8::CpuProfileNode* callback_node =
FindChild(start_node, "CallCollectSample");
CHECK(callback_node);
}
}
enum class EntryCountMode { kAll, kOnlyInlined };
// Count the number of unique source positions.
......
......@@ -200,9 +200,7 @@ TEST(ProfileTreeAddPathFromEndWithLineNumbers) {
ProfileTree tree(CcTest::i_isolate());
ProfileTreeTestHelper helper(&tree);
ProfileStackTrace path = {{{&c, 5}, kNullAddress, false},
{{&b, 3}, kNullAddress, false},
{{&a, 1}, kNullAddress, false}};
ProfileStackTrace path = {{{&c, 5}}, {{&b, 3}}, {{&a, 1}}};
tree.AddPathFromEnd(path, v8::CpuProfileNode::kNoLineNumberInfo, true,
v8::CpuProfilingMode::kCallerLineNumbers);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment