Commit 813c5954 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][debug] Do not hold lock while recompiling functions

This is to avoid a lock inversion problem. In many situation, the
{NativeModule} lock is held while getting the {DebugInfo} lock.
Hence we should never do is the other way around, otherwise we risk a
deadlock.
When setting a breakpoint, we hold the {DebugInfo} lock when triggering
recompilation, but recompilation accesses the {NativeModule} for
creating the {CompilationEnv}, and therefore takes the {NativeModule}
lock.
This CL fixes this lock inversion by giving up the {DebugInfo} lock
before recompiling functions.

R=thibaudm@chromium.org

Bug: v8:10351
Change-Id: Ic818c6589b2b532006aee4c16bac92b2fe79fa65
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2139574
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67042}
parent c4e7f6b6
......@@ -67,7 +67,8 @@ class V8_BASE_EXPORT Mutex final {
return native_handle_;
}
V8_INLINE void AssertHeld() { DCHECK_EQ(1, level_); }
V8_INLINE void AssertHeld() const { DCHECK_EQ(1, level_); }
V8_INLINE void AssertUnheld() const { DCHECK_EQ(0, level_); }
private:
NativeHandle native_handle_;
......
......@@ -644,6 +644,10 @@ class DebugInfoImpl {
void RecompileLiftoffWithBreakpoints(int func_index, Vector<int> offsets,
Isolate* current_isolate) {
// During compilation, we cannot hold the lock, since compilation takes the
// {NativeModule} lock, which could lead to deadlocks.
mutex_.AssertUnheld();
if (func_index == flooded_function_index_) {
// We should not be flooding a function that is already flooded.
DCHECK(!(offsets.size() == 1 && offsets[0] == 0));
......@@ -685,10 +689,13 @@ class DebugInfoImpl {
}
void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
// Hold the mutex while setting the breakpoint. This guards against multiple
// isolates setting breakpoints at the same time. We don't really support
// that scenario yet, but concurrently compiling and installing different
// Liftoff variants of a function would be problematic.
std::vector<int> breakpoints_copy;
{
// Hold the mutex while modifying the set of breakpoints, but release it
// before compiling the new code (see comment in
// {RecompileLiftoffWithBreakpoints}). This needs to be revisited once we
// support setting different breakpoints in different isolates
// (https://crbug.com/v8/10351).
base::MutexGuard guard(&mutex_);
// offset == 0 indicates flooding and should not happen here.
......@@ -705,13 +712,14 @@ class DebugInfoImpl {
// No need to recompile if the function is already flooded.
if (func_index == flooded_function_index_) return;
breakpoints_copy = breakpoints;
}
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints),
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints_copy),
current_isolate);
}
void FloodWithBreakpoints(int func_index, Isolate* current_isolate) {
base::MutexGuard guard(&mutex_);
// 0 is an invalid offset used to indicate flooding.
int offset = 0;
RecompileLiftoffWithBreakpoints(func_index, Vector<int>(&offset, 1),
......@@ -756,6 +764,8 @@ class DebugInfoImpl {
void RemoveBreakpoint(int func_index, int position,
Isolate* current_isolate) {
std::vector<int> breakpoints_copy;
{
base::MutexGuard guard(&mutex_);
const auto& function = native_module_->module()->functions[func_index];
int offset = position - function.code.offset();
......@@ -764,10 +774,13 @@ class DebugInfoImpl {
DCHECK_LT(0, offset);
auto insertion_point =
std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
if (insertion_point != breakpoints.end() && *insertion_point == offset) {
if (insertion_point == breakpoints.end()) return;
if (*insertion_point != offset) return;
breakpoints.erase(insertion_point);
if (func_index == flooded_function_index_) return;
breakpoints_copy = breakpoints;
}
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints),
RecompileLiftoffWithBreakpoints(func_index, VectorOf(breakpoints_copy),
current_isolate);
}
......@@ -781,10 +794,14 @@ class DebugInfoImpl {
private:
const DebugSideTable* GetDebugSideTable(WasmCode* code,
AccountingAllocator* allocator) {
{
// Only hold the mutex temporarily. We can't hold it while generating the
// debug side table, because compilation takes the {NativeModule} lock.
base::MutexGuard guard(&mutex_);
if (auto& existing_table = debug_side_tables_[code]) {
return existing_table.get();
}
}
// Otherwise create the debug side table now.
auto* module = native_module_->module();
......@@ -799,7 +816,10 @@ class DebugInfoImpl {
DebugSideTable* ret = debug_side_table.get();
// Install into cache and return.
{
base::MutexGuard guard(&mutex_);
debug_side_tables_[code] = std::move(debug_side_table);
}
return ret;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment