Commit 813c5954 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][debug] Do not hold lock while recompiling functions

This is to avoid a lock inversion problem. In many situation, the
{NativeModule} lock is held while getting the {DebugInfo} lock.
Hence we should never do is the other way around, otherwise we risk a
deadlock.
When setting a breakpoint, we hold the {DebugInfo} lock when triggering
recompilation, but recompilation accesses the {NativeModule} for
creating the {CompilationEnv}, and therefore takes the {NativeModule}
lock.
This CL fixes this lock inversion by giving up the {DebugInfo} lock
before recompiling functions.

R=thibaudm@chromium.org

Bug: v8:10351
Change-Id: Ic818c6589b2b532006aee4c16bac92b2fe79fa65
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2139574
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67042}
parent c4e7f6b6
...@@ -67,7 +67,8 @@ class V8_BASE_EXPORT Mutex final { ...@@ -67,7 +67,8 @@ class V8_BASE_EXPORT Mutex final {
return native_handle_; return native_handle_;
} }
V8_INLINE void AssertHeld() { DCHECK_EQ(1, level_); } V8_INLINE void AssertHeld() const { DCHECK_EQ(1, level_); }
V8_INLINE void AssertUnheld() const { DCHECK_EQ(0, level_); }
private: private:
NativeHandle native_handle_; NativeHandle native_handle_;
......
...@@ -644,6 +644,10 @@ class DebugInfoImpl { ...@@ -644,6 +644,10 @@ class DebugInfoImpl {
void RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets, void RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets,
Isolate* current_isolate) { Isolate* current_isolate) {
// During compilation, we cannot hold the lock, since compilation takes the
// {NativeModule} lock, which could lead to deadlocks.
mutex_.AssertUnheld();
if (func_index == flooded_function_index_) { if (func_index == flooded_function_index_) {
// We should not be flooding a function that is already flooded. // We should not be flooding a function that is already flooded.
DCHECK(!(offsets.size() == 1 && offsets[0] == 0)); DCHECK(!(offsets.size() == 1 && offsets[0] == 0));
...@@ -685,33 +689,37 @@ class DebugInfoImpl { ...@@ -685,33 +689,37 @@ class DebugInfoImpl {
} }
void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) { void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
// Hold the mutex while setting the breakpoint. This guards against multiple std::vector<int> breakpoints_copy;
// isolates setting breakpoints at the same time. We don't really support {
// that scenario yet, but concurrently compiling and installing different // Hold the mutex while modifying the set of breakpoints, but release it
// Liftoff variants of a function would be problematic. // before compiling the new code (see comment in
base::MutexGuard guard(&mutex_); // {RecompileLiftoffWithBreakpoints}). This needs to be revisited once we
// support setting different breakpoints in different isolates
// offset == 0 indicates flooding and should not happen here. // (https://crbug.com/v8/10351).
DCHECK_NE(0, offset); base::MutexGuard guard(&mutex_);
// offset == 0 indicates flooding and should not happen here.
DCHECK_NE(0, offset);
std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
auto insertion_point =
std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
if (insertion_point != breakpoints.end() && *insertion_point == offset) {
// The breakpoint is already set.
return;
}
breakpoints.insert(insertion_point, offset);
std::vector<int>& breakpoints = breakpoints_per_function_[func_index]; // No need to recompile if the function is already flooded.
auto insertion_point = if (func_index == flooded_function_index_) return;
std::lower_bound(breakpoints.begin(), breakpoints.end(), offset); breakpoints_copy = breakpoints;
if (insertion_point != breakpoints.end() && *insertion_point == offset) {
// The breakpoint is already set.
return;
} }
breakpoints.insert(insertion_point, offset);
// No need to recompile if the function is already flooded.
if (func_index == flooded_function_index_) return;
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints), RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints_copy),
current_isolate); current_isolate);
} }
void FloodWithBreakpoints(int func_index, Isolate* current_isolate) { void FloodWithBreakpoints(int func_index, Isolate* current_isolate) {
base::MutexGuard guard(&mutex_);
// 0 is an invalid offset used to indicate flooding. // 0 is an invalid offset used to indicate flooding.
int offset = 0; int offset = 0;
RecompileLiftoffWithBreakpoints(func_index, Vector<int>(&offset, 1), RecompileLiftoffWithBreakpoints(func_index, Vector<int>(&offset, 1),
...@@ -756,18 +764,23 @@ class DebugInfoImpl { ...@@ -756,18 +764,23 @@ class DebugInfoImpl {
void RemoveBreakpoint(int func_index, int position, void RemoveBreakpoint(int func_index, int position,
Isolate* current_isolate) { Isolate* current_isolate) {
base::MutexGuard guard(&mutex_); std::vector<int> breakpoints_copy;
const auto& function = native_module_->module()->functions[func_index]; {
int offset = position - function.code.offset(); base::MutexGuard guard(&mutex_);
const auto& function = native_module_->module()->functions[func_index];
std::vector<int>& breakpoints = breakpoints_per_function_[func_index]; int offset = position - function.code.offset();
DCHECK_LT(0, offset);
auto insertion_point = std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
std::lower_bound(breakpoints.begin(), breakpoints.end(), offset); DCHECK_LT(0, offset);
if (insertion_point != breakpoints.end() && *insertion_point == offset) { auto insertion_point =
std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
if (insertion_point == breakpoints.end()) return;
if (*insertion_point != offset) return;
breakpoints.erase(insertion_point); breakpoints.erase(insertion_point);
if (func_index == flooded_function_index_) return;
breakpoints_copy = breakpoints;
} }
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints), RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints_copy),
current_isolate); current_isolate);
} }
...@@ -781,9 +794,13 @@ class DebugInfoImpl { ...@@ -781,9 +794,13 @@ class DebugInfoImpl {
private: private:
const DebugSideTable* GetDebugSideTable(WasmCode* code, const DebugSideTable* GetDebugSideTable(WasmCode* code,
AccountingAllocator* allocator) { AccountingAllocator* allocator) {
base::MutexGuard guard(&mutex_); {
if (auto& existing_table = debug_side_tables_[code]) { // Only hold the mutex temporarily. We can't hold it while generating the
return existing_table.get(); // debug side table, because compilation takes the {NativeModule} lock.
base::MutexGuard guard(&mutex_);
if (auto& existing_table = debug_side_tables_[code]) {
return existing_table.get();
}
} }
// Otherwise create the debug side table now. // Otherwise create the debug side table now.
...@@ -799,7 +816,10 @@ class DebugInfoImpl { ...@@ -799,7 +816,10 @@ class DebugInfoImpl {
DebugSideTable* ret = debug_side_table.get(); DebugSideTable* ret = debug_side_table.get();
// Install into cache and return. // Install into cache and return.
debug_side_tables_[code] = std::move(debug_side_table); {
base::MutexGuard guard(&mutex_);
debug_side_tables_[code] = std::move(debug_side_table);
}
return ret; return ret;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment