Commit 82efa4bd authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[cpu-profiler] Refactor ProfileGenerator

Rename it to Symbolizer because it does exactly that.

Change the SymbolizeTickSample method to return the symbolized state
rather than pass it on to the ProfilesCollection. This makes it easier
to test as now it only relies on the CodeMap provided to it.

Make EntryForVMState a free-floating function as it doesn't rely on
state and then we can avoid importing the StateTag definition in the
header.

Remove the UNREACHABLE from EntryForVMState as the compiler got smarter
and doesn't need it anymore.

Pass the CpuProfilesCollection to SamplingEventsProcessor instead,
as it is now responsible for putting the symbolized samples into the
collection to be sorted into the appropriate profiles.

Change-Id: I104290eff22b7d94a1bd34ba904036badccf4e13
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2440522
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarSimon Zünd <szuend@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70248}
parent a769ea7a
......@@ -3090,6 +3090,8 @@ v8_source_set("v8_base_without_compiler") {
"src/profiler/sampling-heap-profiler.h",
"src/profiler/strings-storage.cc",
"src/profiler/strings-storage.h",
"src/profiler/symbolizer.cc",
"src/profiler/symbolizer.h",
"src/profiler/tick-sample.cc",
"src/profiler/tick-sample.h",
"src/profiler/tracing-cpu-profiler.cc",
......
......@@ -18,6 +18,7 @@
#include "src/logging/log.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-stats.h"
#include "src/profiler/symbolizer.h"
#include "src/utils/locked-queue-inl.h"
#include "src/wasm/wasm-engine.h"
......@@ -96,10 +97,10 @@ ProfilingScope::~ProfilingScope() {
}
ProfilerEventsProcessor::ProfilerEventsProcessor(
Isolate* isolate, ProfileGenerator* generator,
Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
symbolizer_(symbolizer),
code_observer_(code_observer),
last_code_event_id_(0),
last_processed_code_event_id_(0),
......@@ -109,11 +110,12 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(
}
SamplingEventsProcessor::SamplingEventsProcessor(
Isolate* isolate, ProfileGenerator* generator,
ProfilerCodeObserver* code_observer, base::TimeDelta period,
bool use_precise_sampling)
: ProfilerEventsProcessor(isolate, generator, code_observer),
Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
base::TimeDelta period, bool use_precise_sampling)
: ProfilerEventsProcessor(isolate, symbolizer, code_observer),
sampler_(new CpuSampler(isolate, this)),
profiles_(profiles),
period_(period),
use_precise_sampling_(use_precise_sampling) {
sampler_->Start();
......@@ -209,6 +211,15 @@ void ProfilerEventsProcessor::CodeEventHandler(
}
}
void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
const TickSampleEventRecord* record) {
Symbolizer::SymbolizedSample symbolized =
symbolizer_->SymbolizeTickSample(record->sample);
profiles_->AddPathToCurrentProfiles(
record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
record->sample.update_stats, record->sample.sampling_interval);
}
ProfilerEventsProcessor::SampleProcessingResult
SamplingEventsProcessor::ProcessOneSample() {
TickSampleEventRecord record1;
......@@ -216,7 +227,7 @@ SamplingEventsProcessor::ProcessOneSample() {
(record1.order == last_processed_code_event_id_)) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->SymbolizeTickSample(record.sample);
SymbolizeAndAddToProfiles(&record);
return OneSampleProcessed;
}
......@@ -228,7 +239,7 @@ SamplingEventsProcessor::ProcessOneSample() {
if (record->order != last_processed_code_event_id_) {
return FoundSampleForNextCodeEvent;
}
generator_->SymbolizeTickSample(record->sample);
SymbolizeAndAddToProfiles(record);
ticks_buffer_.Remove();
return OneSampleProcessed;
}
......@@ -437,7 +448,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfilingLoggingMode logging_mode,
CpuProfilesCollection* test_profiles,
ProfileGenerator* test_generator,
Symbolizer* test_symbolizer,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
naming_mode_(naming_mode),
......@@ -445,7 +456,7 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
base_sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
generator_(test_generator),
symbolizer_(test_symbolizer),
processor_(test_processor),
code_observer_(isolate),
is_profiling_(false) {
......@@ -475,7 +486,7 @@ void CpuProfiler::set_use_precise_sampling(bool value) {
void CpuProfiler::ResetProfiles() {
profiles_.reset(new CpuProfilesCollection(isolate_));
profiles_->set_cpu_profiler(this);
generator_.reset();
symbolizer_.reset();
if (!profiling_scope_) profiler_listener_.reset();
}
......@@ -543,14 +554,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
EnableLogging();
}
if (!generator_) {
generator_.reset(
new ProfileGenerator(profiles_.get(), code_observer_.code_map()));
if (!symbolizer_) {
symbolizer_ = std::make_unique<Symbolizer>(code_observer_.code_map());
}
base::TimeDelta sampling_interval = ComputeSamplingInterval();
processor_.reset(
new SamplingEventsProcessor(isolate_, generator_.get(), &code_observer_,
processor_.reset(new SamplingEventsProcessor(
isolate_, symbolizer_.get(), &code_observer_, profiles_.get(),
sampling_interval, use_precise_sampling_));
is_profiling_ = true;
......
......@@ -27,7 +27,7 @@ class CodeEntry;
class CodeMap;
class CpuProfilesCollection;
class Isolate;
class ProfileGenerator;
class Symbolizer;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
......@@ -165,7 +165,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
virtual void SetSamplingInterval(base::TimeDelta) {}
protected:
ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
ProfilerEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer);
// Called from events processing thread (Run() method.)
......@@ -178,7 +178,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
};
virtual SampleProcessingResult ProcessOneSample() = 0;
ProfileGenerator* generator_;
Symbolizer* symbolizer_;
ProfilerCodeObserver* code_observer_;
std::atomic_bool running_{true};
base::ConditionVariable running_cond_;
......@@ -193,8 +193,9 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
class V8_EXPORT_PRIVATE SamplingEventsProcessor
: public ProfilerEventsProcessor {
public:
SamplingEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
SamplingEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer,
CpuProfilesCollection* profiles,
base::TimeDelta period, bool use_precise_sampling);
~SamplingEventsProcessor() override;
......@@ -221,6 +222,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
private:
SampleProcessingResult ProcessOneSample() override;
void SymbolizeAndAddToProfiles(const TickSampleEventRecord* record);
static const size_t kTickSampleBufferSize = 512 * KB;
static const size_t kTickSampleQueueLength =
......@@ -228,6 +230,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
std::unique_ptr<sampler::Sampler> sampler_;
CpuProfilesCollection* profiles_;
base::TimeDelta period_; // Samples & code events processing period.
const bool use_precise_sampling_; // Whether or not busy-waiting is used for
// low sampling intervals on Windows.
......@@ -294,7 +297,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
CpuProfilingLoggingMode logging_mode,
CpuProfilesCollection* profiles, ProfileGenerator* test_generator,
CpuProfilesCollection* profiles, Symbolizer* test_symbolizer,
ProfilerEventsProcessor* test_processor);
~CpuProfiler();
......@@ -321,7 +324,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
bool is_profiling() const { return is_profiling_; }
ProfileGenerator* generator() const { return generator_.get(); }
Symbolizer* symbolizer() const { return symbolizer_.get(); }
ProfilerEventsProcessor* processor() const { return processor_.get(); }
Isolate* isolate() const { return isolate_; }
......@@ -352,7 +355,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
// to a multiple of, or used as the default if unspecified.
base::TimeDelta base_sampling_interval_;
std::unique_ptr<CpuProfilesCollection> profiles_;
std::unique_ptr<ProfileGenerator> generator_;
std::unique_ptr<Symbolizer> symbolizer_;
std::unique_ptr<ProfilerEventsProcessor> processor_;
std::unique_ptr<ProfilerListener> profiler_listener_;
std::unique_ptr<ProfilingScope> profiling_scope_;
......
......@@ -28,13 +28,6 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
position_(0),
line_info_(std::move(line_info)) {}
inline CodeEntry* ProfileGenerator::FindEntry(Address address,
Address* out_instruction_start) {
CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
if (entry) entry->mark_used();
return entry;
}
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
ProfileNode* parent, int line_number)
: tree_(tree),
......
......@@ -833,172 +833,5 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles,
CodeMap* code_map)
: profiles_(profiles), code_map_(code_map) {}
void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
ProfileStackTrace stack_trace;
// Conservatively reserve space for stack frames + pc + function + vm-state.
// There could in fact be more of them because of inlined entries.
stack_trace.reserve(sample.frames_count + 3);
// The ProfileNode knows nothing about all versions of generated code for
// the same JS function. The line number information associated with
// the latest version of generated code is used to find a source line number
// for a JS function. Then, the detected source line is passed to
// ProfileNode to increase the tick count for this source line.
const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
int src_line = no_line_info;
bool src_line_not_found = true;
if (sample.pc != nullptr) {
if (sample.has_external_callback && sample.state == EXTERNAL) {
// Don't use PC when in external callback code, as it can point
// inside a callback's code, and we will erroneously report
// that a callback calls itself.
stack_trace.push_back({{FindEntry(reinterpret_cast<Address>(
sample.external_callback_entry)),
no_line_info}});
} else {
Address attributed_pc = reinterpret_cast<Address>(sample.pc);
Address pc_entry_instruction_start = kNullAddress;
CodeEntry* pc_entry =
FindEntry(attributed_pc, &pc_entry_instruction_start);
// If there is no pc_entry, we're likely in native code. Find out if the
// top of the stack (the return address) was pointing inside a JS
// function, meaning that we have encountered a frameless invocation.
if (!pc_entry && !sample.has_external_callback) {
attributed_pc = reinterpret_cast<Address>(sample.tos);
pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
}
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed, SafeStackFrameIterator incorrectly thinks that
// ebp contains the return address of the current function and skips the
// caller's frame. Check for this case and just skip such samples.
if (pc_entry) {
int pc_offset =
static_cast<int>(attributed_pc - pc_entry_instruction_start);
// TODO(petermarshall): pc_offset can still be negative in some cases.
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
}
src_line_not_found = false;
stack_trace.push_back({{pc_entry, src_line}});
if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
// When current function is either the Function.prototype.apply or the
// Function.prototype.call builtin the top frame is either frame of
// the calling JS function or internal frame.
// In the latter case we know the caller for sure but in the
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
if (!sample.has_external_callback) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kInCallOrApply);
stack_trace.push_back(
{{CodeEntry::unresolved_entry(), no_line_info}});
}
}
}
}
for (unsigned i = 0; i < sample.frames_count; ++i) {
Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
Address instruction_start = kNullAddress;
CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
int line_number = no_line_info;
if (entry) {
// Find out if the entry has an inlining stack associated.
int pc_offset = static_cast<int>(stack_pos - instruction_start);
// TODO(petermarshall): pc_offset can still be negative in some cases.
const std::vector<CodeEntryAndLineNumber>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
for (auto entry : *inline_stack) {
stack_trace.push_back({entry});
}
// This is a bit of a messy hack. The line number for the most-inlined
// frame (the function at the end of the chain of function calls) has
// the wrong line number in inline_stack. The actual line number in
// this function is stored in the SourcePositionTable in entry. We fix
// up the line number for the most-inlined frame here.
// TODO(petermarshall): Remove this and use a tree with a node per
// inlining_id.
DCHECK(!inline_stack->empty());
size_t index = stack_trace.size() - inline_stack->size();
stack_trace[index].entry.line_number = most_inlined_frame_line_number;
}
// Skip unresolved frames (e.g. internal frame) and get source line of
// the first JS caller.
if (src_line_not_found) {
src_line = entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = entry->line_number();
}
src_line_not_found = false;
}
line_number = entry->GetSourceLine(pc_offset);
// The inline stack contains the top-level function i.e. the same
// function as entry. We don't want to add it twice. The one from the
// inline stack has the correct line number for this particular inlining
// so we use it instead of pushing entry to stack_trace.
if (inline_stack) continue;
}
stack_trace.push_back({{entry, line_number}});
}
}
if (FLAG_prof_browser_mode) {
bool no_symbolized_entries = true;
for (auto e : stack_trace) {
if (e.entry.code_entry != nullptr) {
no_symbolized_entries = false;
break;
}
}
// If no frames were symbolized, put the VM state entry in.
if (no_symbolized_entries) {
if (sample.pc == nullptr) {
ProfilerStats::Instance()->AddReason(ProfilerStats::Reason::kNullPC);
} else {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kNoSymbolizedFrames);
}
stack_trace.push_back({{EntryForVMState(sample.state), no_line_info}});
}
}
profiles_->AddPathToCurrentProfiles(sample.timestamp, stack_trace, src_line,
sample.update_stats,
sample.sampling_interval);
}
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
switch (tag) {
case GC:
return CodeEntry::gc_entry();
case JS:
case PARSER:
case COMPILER:
case BYTECODE_COMPILER:
case ATOMICS_WAIT:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
case OTHER:
case EXTERNAL:
return CodeEntry::program_entry();
case IDLE:
return CodeEntry::idle_entry();
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8
......@@ -483,28 +483,6 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
class V8_EXPORT_PRIVATE ProfileGenerator {
public:
explicit ProfileGenerator(CpuProfilesCollection* profiles, CodeMap* code_map);
// Use the CodeMap to turn the raw addresses recorded in the sample into
// code/function names. The symbolized stack is added to the relevant
// profiles in the CpuProfilesCollection.
void SymbolizeTickSample(const TickSample& sample);
CodeMap* code_map() { return code_map_; }
private:
CodeEntry* FindEntry(Address address,
Address* out_instruction_start = nullptr);
CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
CodeMap* const code_map_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
} // namespace internal
} // namespace v8
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/profiler/symbolizer.h"
#include "src/execution/vm-state.h"
#include "src/profiler/profile-generator.h"
#include "src/profiler/profiler-stats.h"
#include "src/profiler/tick-sample.h"
namespace v8 {
namespace internal {
Symbolizer::Symbolizer(CodeMap* code_map) : code_map_(code_map) {}
CodeEntry* Symbolizer::FindEntry(Address address,
Address* out_instruction_start) {
CodeEntry* entry = code_map_->FindEntry(address, out_instruction_start);
if (entry) entry->mark_used();
return entry;
}
namespace {
CodeEntry* EntryForVMState(StateTag tag) {
switch (tag) {
case GC:
return CodeEntry::gc_entry();
case JS:
case PARSER:
case COMPILER:
case BYTECODE_COMPILER:
case ATOMICS_WAIT:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
case OTHER:
case EXTERNAL:
return CodeEntry::program_entry();
case IDLE:
return CodeEntry::idle_entry();
}
}
} // namespace
Symbolizer::SymbolizedSample Symbolizer::SymbolizeTickSample(
const TickSample& sample) {
ProfileStackTrace stack_trace;
// Conservatively reserve space for stack frames + pc + function + vm-state.
// There could in fact be more of them because of inlined entries.
stack_trace.reserve(sample.frames_count + 3);
// The ProfileNode knows nothing about all versions of generated code for
// the same JS function. The line number information associated with
// the latest version of generated code is used to find a source line number
// for a JS function. Then, the detected source line is passed to
// ProfileNode to increase the tick count for this source line.
const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
int src_line = no_line_info;
bool src_line_not_found = true;
if (sample.pc != nullptr) {
if (sample.has_external_callback && sample.state == EXTERNAL) {
// Don't use PC when in external callback code, as it can point
// inside a callback's code, and we will erroneously report
// that a callback calls itself.
stack_trace.push_back({{FindEntry(reinterpret_cast<Address>(
sample.external_callback_entry)),
no_line_info}});
} else {
Address attributed_pc = reinterpret_cast<Address>(sample.pc);
Address pc_entry_instruction_start = kNullAddress;
CodeEntry* pc_entry =
FindEntry(attributed_pc, &pc_entry_instruction_start);
// If there is no pc_entry, we're likely in native code. Find out if the
// top of the stack (the return address) was pointing inside a JS
// function, meaning that we have encountered a frameless invocation.
if (!pc_entry && !sample.has_external_callback) {
attributed_pc = reinterpret_cast<Address>(sample.tos);
pc_entry = FindEntry(attributed_pc, &pc_entry_instruction_start);
}
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed, SafeStackFrameIterator incorrectly thinks that
// ebp contains the return address of the current function and skips the
// caller's frame. Check for this case and just skip such samples.
if (pc_entry) {
int pc_offset =
static_cast<int>(attributed_pc - pc_entry_instruction_start);
// TODO(petermarshall): pc_offset can still be negative in some cases.
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
}
src_line_not_found = false;
stack_trace.push_back({{pc_entry, src_line}});
if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
// When current function is either the Function.prototype.apply or the
// Function.prototype.call builtin the top frame is either frame of
// the calling JS function or internal frame.
// In the latter case we know the caller for sure but in the
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
if (!sample.has_external_callback) {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kInCallOrApply);
stack_trace.push_back(
{{CodeEntry::unresolved_entry(), no_line_info}});
}
}
}
}
for (unsigned i = 0; i < sample.frames_count; ++i) {
Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
Address instruction_start = kNullAddress;
CodeEntry* entry = FindEntry(stack_pos, &instruction_start);
int line_number = no_line_info;
if (entry) {
// Find out if the entry has an inlining stack associated.
int pc_offset = static_cast<int>(stack_pos - instruction_start);
// TODO(petermarshall): pc_offset can still be negative in some cases.
const std::vector<CodeEntryAndLineNumber>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
for (auto entry : *inline_stack) {
stack_trace.push_back({entry});
}
// This is a bit of a messy hack. The line number for the most-inlined
// frame (the function at the end of the chain of function calls) has
// the wrong line number in inline_stack. The actual line number in
// this function is stored in the SourcePositionTable in entry. We fix
// up the line number for the most-inlined frame here.
// TODO(petermarshall): Remove this and use a tree with a node per
// inlining_id.
DCHECK(!inline_stack->empty());
size_t index = stack_trace.size() - inline_stack->size();
stack_trace[index].entry.line_number = most_inlined_frame_line_number;
}
// Skip unresolved frames (e.g. internal frame) and get source line of
// the first JS caller.
if (src_line_not_found) {
src_line = entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = entry->line_number();
}
src_line_not_found = false;
}
line_number = entry->GetSourceLine(pc_offset);
// The inline stack contains the top-level function i.e. the same
// function as entry. We don't want to add it twice. The one from the
// inline stack has the correct line number for this particular inlining
// so we use it instead of pushing entry to stack_trace.
if (inline_stack) continue;
}
stack_trace.push_back({{entry, line_number}});
}
}
if (FLAG_prof_browser_mode) {
bool no_symbolized_entries = true;
for (auto e : stack_trace) {
if (e.entry.code_entry != nullptr) {
no_symbolized_entries = false;
break;
}
}
// If no frames were symbolized, put the VM state entry in.
if (no_symbolized_entries) {
if (sample.pc == nullptr) {
ProfilerStats::Instance()->AddReason(ProfilerStats::Reason::kNullPC);
} else {
ProfilerStats::Instance()->AddReason(
ProfilerStats::Reason::kNoSymbolizedFrames);
}
stack_trace.push_back({{EntryForVMState(sample.state), no_line_info}});
}
}
return SymbolizedSample{stack_trace, src_line};
}
} // namespace internal
} // namespace v8
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_PROFILER_SYMBOLIZER_H_
#define V8_PROFILER_SYMBOLIZER_H_
#include "src/base/macros.h"
#include "src/profiler/profile-generator.h"
namespace v8 {
namespace internal {
class CodeEntry;
class CodeMap;
class V8_EXPORT_PRIVATE Symbolizer {
public:
explicit Symbolizer(CodeMap* code_map);
struct SymbolizedSample {
ProfileStackTrace stack_trace;
int src_line;
};
// Use the CodeMap to turn the raw addresses recorded in the sample into
// code/function names.
SymbolizedSample SymbolizeTickSample(const TickSample& sample);
CodeMap* code_map() { return code_map_; }
private:
CodeEntry* FindEntry(Address address,
Address* out_instruction_start = nullptr);
CodeMap* const code_map_;
DISALLOW_COPY_AND_ASSIGN(Symbolizer);
};
} // namespace internal
} // namespace v8
#endif // V8_PROFILER_SYMBOLIZER_H_
This diff is collapsed.
......@@ -34,6 +34,7 @@
#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/symbolizer.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
......@@ -376,19 +377,14 @@ class TestSetup {
TEST(SymbolizeTickSample) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
CpuProfilesCollection profiles(isolate);
CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("");
CodeMap code_map;
ProfileGenerator generator(&profiles, &code_map);
Symbolizer symbolizer(&code_map);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
symbolizer.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
symbolizer.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
// We are building the following calls tree:
// -> aaa - sample1
......@@ -399,7 +395,13 @@ TEST(SymbolizeTickSample) {
sample1.tos = ToPointer(0x1500);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
generator.SymbolizeTickSample(sample1);
Symbolizer::SymbolizedSample symbolized =
symbolizer.SymbolizeTickSample(sample1);
ProfileStackTrace& stack_trace = symbolized.stack_trace;
CHECK_EQ(2, stack_trace.size());
CHECK_EQ(entry1, stack_trace[0].entry.code_entry);
CHECK_EQ(entry1, stack_trace[1].entry.code_entry);
TickSample sample2;
sample2.pc = ToPointer(0x1925);
sample2.tos = ToPointer(0x1900);
......@@ -407,32 +409,26 @@ TEST(SymbolizeTickSample) {
sample2.stack[1] = ToPointer(0x10000); // non-existent.
sample2.stack[2] = ToPointer(0x1620);
sample2.frames_count = 3;
generator.SymbolizeTickSample(sample2);
symbolized = symbolizer.SymbolizeTickSample(sample2);
stack_trace = symbolized.stack_trace;
CHECK_EQ(4, stack_trace.size());
CHECK_EQ(entry3, stack_trace[0].entry.code_entry);
CHECK_EQ(entry2, stack_trace[1].entry.code_entry);
CHECK_EQ(nullptr, stack_trace[2].entry.code_entry);
CHECK_EQ(entry1, stack_trace[3].entry.code_entry);
TickSample sample3;
sample3.pc = ToPointer(0x1510);
sample3.tos = ToPointer(0x1500);
sample3.stack[0] = ToPointer(0x1910);
sample3.stack[1] = ToPointer(0x1610);
sample3.frames_count = 2;
generator.SymbolizeTickSample(sample3);
CpuProfile* profile = profiles.StopProfiling("");
CHECK(profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
CHECK(!top_down_test_helper.Walk(entry2));
CHECK(!top_down_test_helper.Walk(entry3));
ProfileNode* node1 = top_down_test_helper.Walk(entry1);
CHECK(node1);
CHECK_EQ(entry1, node1->entry());
ProfileNode* node2 = top_down_test_helper.Walk(entry1, entry1);
CHECK(node2);
CHECK_EQ(entry1, node2->entry());
ProfileNode* node3 = top_down_test_helper.Walk(entry1, entry2, entry3);
CHECK(node3);
CHECK_EQ(entry3, node3->entry());
ProfileNode* node4 = top_down_test_helper.Walk(entry1, entry3, entry1);
CHECK(node4);
CHECK_EQ(entry1, node4->entry());
symbolized = symbolizer.SymbolizeTickSample(sample3);
stack_trace = symbolized.stack_trace;
CHECK_EQ(3, stack_trace.size());
CHECK_EQ(entry1, stack_trace[0].entry.code_entry);
CHECK_EQ(entry3, stack_trace[1].entry.code_entry);
CHECK_EQ(entry1, stack_trace[2].entry.code_entry);
}
static void CheckNodeIds(const ProfileNode* node, unsigned* expectedId) {
......@@ -442,7 +438,6 @@ static void CheckNodeIds(const ProfileNode* node, unsigned* expectedId) {
}
}
TEST(SampleIds) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
......@@ -451,13 +446,13 @@ TEST(SampleIds) {
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers});
CodeMap code_map;
ProfileGenerator generator(&profiles, &code_map);
Symbolizer symbolizer(&code_map);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
symbolizer.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
symbolizer.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
// We are building the following calls tree:
// -> aaa #3 - sample1
......@@ -468,7 +463,11 @@ TEST(SampleIds) {
sample1.pc = ToPointer(0x1600);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
generator.SymbolizeTickSample(sample1);
auto symbolized = symbolizer.SymbolizeTickSample(sample1);
profiles.AddPathToCurrentProfiles(sample1.timestamp, symbolized.stack_trace,
symbolized.src_line, true,
base::TimeDelta());
TickSample sample2;
sample2.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample2.pc = ToPointer(0x1925);
......@@ -476,14 +475,21 @@ TEST(SampleIds) {
sample2.stack[1] = ToPointer(0x10000); // non-existent.
sample2.stack[2] = ToPointer(0x1620);
sample2.frames_count = 3;
generator.SymbolizeTickSample(sample2);
symbolized = symbolizer.SymbolizeTickSample(sample2);
profiles.AddPathToCurrentProfiles(sample2.timestamp, symbolized.stack_trace,
symbolized.src_line, true,
base::TimeDelta());
TickSample sample3;
sample3.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample3.pc = ToPointer(0x1510);
sample3.stack[0] = ToPointer(0x1910);
sample3.stack[1] = ToPointer(0x1610);
sample3.frames_count = 2;
generator.SymbolizeTickSample(sample3);
symbolized = symbolizer.SymbolizeTickSample(sample3);
profiles.AddPathToCurrentProfiles(sample3.timestamp, symbolized.stack_trace,
symbolized.src_line, true,
base::TimeDelta());
CpuProfile* profile = profiles.StopProfiling("");
unsigned nodeId = 1;
......@@ -497,7 +503,6 @@ TEST(SampleIds) {
}
}
TEST(NoSamples) {
TestSetup test_setup;
i::Isolate* isolate = CcTest::i_isolate();
......@@ -506,9 +511,9 @@ TEST(NoSamples) {
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("");
CodeMap code_map;
ProfileGenerator generator(&profiles, &code_map);
Symbolizer symbolizer(&code_map);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
symbolizer.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
// We are building the following calls tree:
// (root)#1 -> aaa #2 -> aaa #3 - sample1
......@@ -516,17 +521,19 @@ TEST(NoSamples) {
sample1.pc = ToPointer(0x1600);
sample1.stack[0] = ToPointer(0x1510);
sample1.frames_count = 1;
generator.SymbolizeTickSample(sample1);
auto symbolized = symbolizer.SymbolizeTickSample(sample1);
profiles.AddPathToCurrentProfiles(v8::base::TimeTicks::HighResolutionNow(),
symbolized.stack_trace, symbolized.src_line,
true, base::TimeDelta());
CpuProfile* profile = profiles.StopProfiling("");
unsigned nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(3u, nodeId - 1);
CHECK_EQ(0, profile->samples_count());
CHECK_EQ(1, profile->samples_count());
}
static const ProfileNode* PickChild(const ProfileNode* parent,
const char* name) {
for (const ProfileNode* child : *parent->children()) {
......@@ -674,7 +681,7 @@ static const char* line_number_test_source_profile_time_functions =
int GetFunctionLineNumber(CpuProfiler* profiler, LocalContext* env,
const char* name) {
CodeMap* code_map = profiler->generator()->code_map();
CodeMap* code_map = profiler->symbolizer()->code_map();
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
(*env)->Global()->Get(env->local(), v8_str(name)).ToLocalChecked())));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment