Commit c66cd826 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][debug] Only patch a single frame for stepping

Stepping only happens in one frame at a time, so we don't need to
rewrite the whole stack. This allows us to remove the
{flooded_function_index_}, since no function is globally flooded any
more.
A follow-up CL will ensure that the code will also not be installed in
the code table and jump table any more, to fix issues with non-local
control flow (i.e. catching a trap and reentering wasm), where we
could currently accidentally execute flooded code. It will also speed
up stepping over recursive calls enormously, since the recursive calls
don't run into the flooded breakpoints any more.

R=thibaudm@chromium.org

Bug: v8:10235
Change-Id: Ifae5e35c3242c95e1fe1a89a169ce874b818a288
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2152646Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67202}
parent 56e0a2ac
...@@ -653,17 +653,12 @@ class DebugInfoImpl { ...@@ -653,17 +653,12 @@ class DebugInfoImpl {
return local_names_->GetName(func_index, local_index); return local_names_->GetName(func_index, local_index);
} }
void RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets, WasmCode* RecompileLiftoffWithBreakpoints(
Isolate* current_isolate) { int func_index, Vector<int> offsets, Vector<int> extra_source_positions) {
// During compilation, we cannot hold the lock, since compilation takes the // During compilation, we cannot hold the lock, since compilation takes the
// {NativeModule} lock, which could lead to deadlocks. // {NativeModule} lock, which could lead to deadlocks.
mutex_.AssertUnheld(); mutex_.AssertUnheld();
if (func_index == flooded_function_index_) {
// We should not be flooding a function that is already flooded.
DCHECK(!(offsets.size() == 1 && offsets[0] == 0));
flooded_function_index_ = -1;
}
// Recompile the function with Liftoff, setting the new breakpoints. // Recompile the function with Liftoff, setting the new breakpoints.
// Not thread-safe. The caller is responsible for locking {mutex_}. // Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv(); CompilationEnv env = native_module_->CreateCompilationEnv();
...@@ -674,21 +669,15 @@ class DebugInfoImpl { ...@@ -674,21 +669,15 @@ class DebugInfoImpl {
wire_bytes.begin() + function->code.end_offset()}; wire_bytes.begin() + function->code.end_offset()};
std::unique_ptr<DebugSideTable> debug_sidetable; std::unique_ptr<DebugSideTable> debug_sidetable;
// Generate additional source positions for current stack frame positions.
// These source positions are used to find return addresses in the new code.
std::vector<int> stack_frame_positions =
StackFramePositions(func_index, current_isolate);
WasmCompilationResult result = ExecuteLiftoffCompilation( WasmCompilationResult result = ExecuteLiftoffCompilation(
native_module_->engine()->allocator(), &env, body, func_index, native_module_->engine()->allocator(), &env, body, func_index,
kForDebugging, nullptr, nullptr, offsets, &debug_sidetable, kForDebugging, nullptr, nullptr, offsets, &debug_sidetable,
VectorOf(stack_frame_positions)); extra_source_positions);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff // Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging. // support for debugging.
if (!result.succeeded()) FATAL("Liftoff compilation failed"); if (!result.succeeded()) FATAL("Liftoff compilation failed");
DCHECK_NOT_NULL(debug_sidetable); DCHECK_NOT_NULL(debug_sidetable);
WasmCodeRefScope wasm_code_ref_scope;
WasmCode* new_code = native_module_->PublishCode( WasmCode* new_code = native_module_->PublishCode(
native_module_->AddCompiledCode(std::move(result))); native_module_->AddCompiledCode(std::move(result)));
...@@ -697,7 +686,7 @@ class DebugInfoImpl { ...@@ -697,7 +686,7 @@ class DebugInfoImpl {
DCHECK(added); DCHECK(added);
USE(added); USE(added);
UpdateReturnAddresses(current_isolate, new_code); return new_code;
} }
void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) { void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
...@@ -721,21 +710,37 @@ class DebugInfoImpl { ...@@ -721,21 +710,37 @@ class DebugInfoImpl {
return; return;
} }
breakpoints.insert(insertion_point, offset); breakpoints.insert(insertion_point, offset);
// No need to recompile if the function is already flooded.
if (func_index == flooded_function_index_) return;
breakpoints_copy = breakpoints; breakpoints_copy = breakpoints;
} }
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints_copy), UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
current_isolate); }
void UpdateBreakpoints(int func_index, Vector<int> breakpoints,
Isolate* current_isolate) {
// Generate additional source positions for current stack frame positions.
// These source positions are used to find return addresses in the new code.
std::vector<int> stack_frame_positions =
StackFramePositions(func_index, current_isolate);
WasmCodeRefScope wasm_code_ref_scope;
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
func_index, breakpoints, VectorOf(stack_frame_positions));
UpdateReturnAddresses(current_isolate, new_code);
} }
void FloodWithBreakpoints(int func_index, Isolate* current_isolate) { void FloodWithBreakpoints(WasmCompiledFrame* frame, Isolate* current_isolate,
ReturnLocation return_location) {
// 0 is an invalid offset used to indicate flooding. // 0 is an invalid offset used to indicate flooding.
int offset = 0; int offset = 0;
RecompileLiftoffWithBreakpoints(func_index, Vector<int>(&offset, 1), WasmCodeRefScope wasm_code_ref_scope;
current_isolate); DCHECK(frame->wasm_code()->is_liftoff());
// Generate an additional source position for the current byte offset.
int byte_offset = frame->byte_offset();
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
frame->function_index(), VectorOf(&offset, 1),
VectorOf(&byte_offset, 1));
UpdateReturnAddress(frame, new_code, return_location);
} }
void PrepareStep(Isolate* isolate, StackFrameId break_frame_id) { void PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
...@@ -745,24 +750,20 @@ class DebugInfoImpl { ...@@ -745,24 +750,20 @@ class DebugInfoImpl {
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame()); WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
StepAction step_action = isolate->debug()->last_step_action(); StepAction step_action = isolate->debug()->last_step_action();
// If we are flooding the top frame, the return location is after a
// breakpoints. Otherwise, it's after a call.
ReturnLocation return_location = kAfterBreakpoint;
// If we are at a return instruction, then any stepping action is equivalent // If we are at a return instruction, then any stepping action is equivalent
// to StepOut, and we need to flood the parent function. // to StepOut, and we need to flood the parent function.
if (IsAtReturn(frame) || step_action == StepOut) { if (IsAtReturn(frame) || step_action == StepOut) {
it.Advance(); it.Advance();
if (it.done() || !it.frame()->is_wasm_compiled()) return; if (it.done() || !it.frame()->is_wasm_compiled()) return;
frame = WasmCompiledFrame::cast(it.frame()); frame = WasmCompiledFrame::cast(it.frame());
return_location = kAfterWasmCall;
} }
if (static_cast<int>(frame->function_index()) != flooded_function_index_) { FloodWithBreakpoints(frame, isolate, return_location);
if (flooded_function_index_ != -1) {
std::vector<int>& breakpoints =
breakpoints_per_function_[flooded_function_index_];
RecompileLiftoffWithBreakpoints(flooded_function_index_,
VectorOf(breakpoints), isolate);
}
FloodWithBreakpoints(frame->function_index(), isolate);
flooded_function_index_ = frame->function_index();
}
stepping_frame_ = frame->id(); stepping_frame_ = frame->id();
} }
...@@ -789,11 +790,10 @@ class DebugInfoImpl { ...@@ -789,11 +790,10 @@ class DebugInfoImpl {
if (insertion_point == breakpoints.end()) return; if (insertion_point == breakpoints.end()) return;
if (*insertion_point != offset) return; if (*insertion_point != offset) return;
breakpoints.erase(insertion_point); breakpoints.erase(insertion_point);
if (func_index == flooded_function_index_) return;
breakpoints_copy = breakpoints; breakpoints_copy = breakpoints;
} }
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints_copy),
current_isolate); UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
} }
void RemoveDebugSideTables(Vector<WasmCode* const> codes) { void RemoveDebugSideTables(Vector<WasmCode* const> codes) {
...@@ -903,7 +903,6 @@ class DebugInfoImpl { ...@@ -903,7 +903,6 @@ class DebugInfoImpl {
// code. The frame layout itself should be independent of breakpoints. // code. The frame layout itself should be independent of breakpoints.
// TODO(thibaudm): update other threads as well. // TODO(thibaudm): update other threads as well.
void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code) { void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code) {
DCHECK(new_code->is_liftoff());
// The first return location is after the breakpoint, others are after wasm // The first return location is after the breakpoint, others are after wasm
// calls. // calls.
ReturnLocation return_location = kAfterBreakpoint; ReturnLocation return_location = kAfterBreakpoint;
...@@ -916,17 +915,26 @@ class DebugInfoImpl { ...@@ -916,17 +915,26 @@ class DebugInfoImpl {
if (frame->native_module() != new_code->native_module()) continue; if (frame->native_module() != new_code->native_module()) continue;
if (frame->function_index() != new_code->index()) continue; if (frame->function_index() != new_code->index()) continue;
if (!frame->wasm_code()->is_liftoff()) continue; if (!frame->wasm_code()->is_liftoff()) continue;
int position = frame->position(); UpdateReturnAddress(frame, new_code, return_location);
int byte_offset = frame->byte_offset();
Address new_pc = FindNewPC(new_code, byte_offset, return_location);
PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
kSystemPointerSize);
USE(position);
// The frame position should still be the same after OSR.
DCHECK_EQ(position, frame->position());
} }
} }
void UpdateReturnAddress(WasmCompiledFrame* frame, WasmCode* new_code,
ReturnLocation return_location) {
DCHECK(new_code->is_liftoff());
DCHECK_EQ(frame->function_index(), new_code->index());
DCHECK_EQ(frame->native_module(), new_code->native_module());
DCHECK(frame->wasm_code()->is_liftoff());
#ifdef DEBUG
int old_position = frame->position();
#endif
Address new_pc = FindNewPC(new_code, frame->byte_offset(), return_location);
PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
kSystemPointerSize);
// The frame position should still be the same after OSR.
DCHECK_EQ(old_position, frame->position());
}
bool IsAtReturn(WasmCompiledFrame* frame) { bool IsAtReturn(WasmCompiledFrame* frame) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
int position = frame->position(); int position = frame->position();
...@@ -956,10 +964,9 @@ class DebugInfoImpl { ...@@ -956,10 +964,9 @@ class DebugInfoImpl {
// function). // function).
std::unordered_map<int, std::vector<int>> breakpoints_per_function_; std::unordered_map<int, std::vector<int>> breakpoints_per_function_;
// Store the frame ID when stepping, to avoid breaking in recursive calls of // Store the frame ID when stepping, to avoid overwriting that frame when
// the same function. // setting or removing a breakpoint.
StackFrameId stepping_frame_ = NO_ID; StackFrameId stepping_frame_ = NO_ID;
int flooded_function_index_ = -1;
DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl); DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment