Commit 0363eb4d authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][debug] Generate sidetables for stepping lazily

We are often stepping multiple times without inspecting the state
in-between. Hence, the generated debug side table is often not being
used. Instead of always generating it, we can generate it lazily on
demand, which can avoid the need to generate it at all.

R=thibaudm@chromium.org

TEST=inspector/debugger/wasm-stepping

Bug: chromium:1172299
Change-Id: I9b9ff4485d65d720d23585856b3d672925460667
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2664446
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72484}
parent be3a6690
......@@ -417,7 +417,7 @@ class LiftoffCompiler {
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
ForDebugging for_debugging, int func_index,
Vector<int> breakpoints = {}, int dead_breakpoint = 0)
Vector<const int> breakpoints = {}, int dead_breakpoint = 0)
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
......@@ -5253,8 +5253,8 @@ class LiftoffCompiler {
// breakpoint, and a pointer after the list of breakpoints as end marker.
// A single breakpoint at offset 0 indicates that we should prepare the
// function for stepping by flooding it with breakpoints.
int* next_breakpoint_ptr_ = nullptr;
int* next_breakpoint_end_ = nullptr;
const int* next_breakpoint_ptr_ = nullptr;
const int* next_breakpoint_end_ = nullptr;
// Introduce a dead breakpoint to ensure that the calculation of the return
// address in OSR is correct.
......@@ -5298,7 +5298,7 @@ class LiftoffCompiler {
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator* allocator, CompilationEnv* env,
const FunctionBody& func_body, int func_index, ForDebugging for_debugging,
Counters* counters, WasmFeatures* detected, Vector<int> breakpoints,
Counters* counters, WasmFeatures* detected, Vector<const int> breakpoints,
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
......@@ -5314,8 +5314,6 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
wasm::WasmInstructionBuffer::New(128 + code_size_estimate * 4 / 3);
std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
// If we are emitting breakpoints, we should also emit the debug side table.
DCHECK_IMPLIES(!breakpoints.empty(), debug_sidetable != nullptr);
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
......@@ -5367,16 +5365,21 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
AccountingAllocator* allocator, CompilationEnv* env,
const FunctionBody& func_body, int func_index) {
const FunctionBody& func_body, int func_index, ForDebugging for_debugging) {
Zone zone(allocator, "LiftoffDebugSideTableZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
constexpr int kSteppingBreakpoints[] = {0};
DCHECK(for_debugging == kForDebugging || for_debugging == kForStepping);
Vector<const int> breakpoints = for_debugging == kForStepping
? ArrayVector(kSteppingBreakpoints)
: Vector<const int>{};
WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
&zone, env->module, env->enabled_features, &detected, func_body,
call_descriptor, env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
&debug_sidetable_builder, kForDebugging, func_index);
&debug_sidetable_builder, for_debugging, func_index, breakpoints);
decoder.Decode();
DCHECK(decoder.ok());
DCHECK(!decoder.interface().did_bailout());
......
......@@ -56,11 +56,12 @@ enum LiftoffBailoutReason : int8_t {
V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
ForDebugging, Counters*, WasmFeatures* detected_features,
Vector<int> breakpoints = {}, std::unique_ptr<DebugSideTable>* = nullptr,
int dead_breakpoint = 0);
Vector<const int> breakpoints = {},
std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0);
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index);
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
ForDebugging);
} // namespace wasm
} // namespace internal
......
......@@ -212,7 +212,8 @@ class DebugInfoImpl {
return offset;
}
WasmCode* RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets,
WasmCode* RecompileLiftoffWithBreakpoints(int func_index,
Vector<const int> offsets,
int dead_breakpoint) {
DCHECK(!mutex_.TryLock()); // Mutex is held externally.
// Recompile the function with Liftoff, setting the new breakpoints.
......@@ -228,22 +229,24 @@ class DebugInfoImpl {
ForDebugging for_debugging = offsets.size() == 1 && offsets[0] == 0
? kForStepping
: kWithBreakpoints;
// Debug side tables for stepping are generated lazily.
bool generate_debug_sidetable = for_debugging == kWithBreakpoints;
Counters* counters = nullptr;
WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation(
native_module_->engine()->allocator(), &env, body, func_index,
for_debugging, counters, &unused_detected, offsets, &debug_sidetable,
dead_breakpoint);
for_debugging, counters, &unused_detected, offsets,
generate_debug_sidetable ? &debug_sidetable : nullptr, dead_breakpoint);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
if (!result.succeeded()) FATAL("Liftoff compilation failed");
DCHECK_NOT_NULL(debug_sidetable);
DCHECK_EQ(generate_debug_sidetable, debug_sidetable != nullptr);
WasmCode* new_code = native_module_->PublishCode(
native_module_->AddCompiledCode(std::move(result)));
DCHECK(new_code->is_inspectable());
{
if (generate_debug_sidetable) {
base::MutexGuard lock(&debug_side_tables_mutex_);
DCHECK_EQ(0, debug_side_tables_.count(new_code));
debug_side_tables_.emplace(new_code, std::move(debug_sidetable));
......@@ -323,12 +326,12 @@ class DebugInfoImpl {
void FloodWithBreakpoints(WasmFrame* frame, ReturnLocation return_location) {
// 0 is an invalid offset used to indicate flooding.
int offset = 0;
constexpr int kFloodingBreakpoints[] = {0};
DCHECK(frame->wasm_code()->is_liftoff());
// Generate an additional source position for the current byte offset.
base::MutexGuard guard(&mutex_);
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
frame->function_index(), VectorOf(&offset, 1), 0);
frame->function_index(), ArrayVector(kFloodingBreakpoints), 0);
UpdateReturnAddress(frame, new_code, return_location);
per_isolate_data_[frame->isolate()].stepping_frame = frame->id();
......@@ -448,11 +451,9 @@ class DebugInfoImpl {
: code(debug_info->native_module_->engine()->code_manager()->LookupCode(
pc)),
pc_offset(static_cast<int>(pc - code->instruction_start())),
debug_side_table(
code->is_inspectable()
? debug_info->GetDebugSideTable(
code, debug_info->native_module_->engine()->allocator())
: nullptr),
debug_side_table(code->is_inspectable()
? debug_info->GetDebugSideTable(code)
: nullptr),
debug_side_table_entry(debug_side_table
? debug_side_table->GetEntry(pc_offset)
: nullptr) {
......@@ -468,8 +469,7 @@ class DebugInfoImpl {
const DebugSideTable::Entry* debug_side_table_entry;
};
const DebugSideTable* GetDebugSideTable(WasmCode* code,
AccountingAllocator* allocator) {
const DebugSideTable* GetDebugSideTable(WasmCode* code) {
DCHECK(code->is_inspectable());
{
// Only hold the mutex temporarily. We can't hold it while generating the
......@@ -481,6 +481,7 @@ class DebugInfoImpl {
// Otherwise create the debug side table now.
auto* module = native_module_->module();
auto* allocator = native_module_->engine()->allocator();
auto* function = &module->functions[code->index()];
ModuleWireBytes wire_bytes{native_module_->wire_bytes()};
Vector<const byte> function_bytes = wire_bytes.GetFunctionBytes(function);
......@@ -488,8 +489,8 @@ class DebugInfoImpl {
FunctionBody func_body{function->sig, 0, function_bytes.begin(),
function_bytes.end()};
std::unique_ptr<DebugSideTable> debug_side_table =
GenerateLiftoffDebugSideTable(allocator, &env, func_body,
code->index());
GenerateLiftoffDebugSideTable(allocator, &env, func_body, code->index(),
code->for_debugging());
DebugSideTable* ret = debug_side_table.get();
// Check cache again, maybe another thread concurrently generated a debug
......
......@@ -83,7 +83,7 @@ class LiftoffCompileEnvironment {
if (breakpoints.empty()) {
std::unique_ptr<DebugSideTable> debug_side_table =
GenerateLiftoffDebugSideTable(CcTest::i_isolate()->allocator(), &env,
test_func.body, 0);
test_func.body, 0, kForDebugging);
CheckTableEquals(*debug_side_table, *debug_side_table_via_compilation);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment