Commit dfd86b05 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Fix flake about missed breakpoints

If multiple isolates were involved, we did not always hit the breakpoint
reliably in all isolates.

This CL fixes this flake this via two changes:

1. Remove breakpoint info when tiering up.
   If we keep the breakpoint information, a second isolate that later
   sets the same breakpoint will see that the breakpoint already exists,
   and will not set it again, even though the code containing the
   breakpoint has been replaced at that point.
   This fixes a flake in the debug/wasm/breakpoints test.

2. Don't overwrite code with breakpoints by default "tiered down" code.
   This is achieved by introducing another state in the {ForDebugging}
   enum which marks that code contains breakpoints. Otherwise it could
   happen that two isolates start tiering down (both recompiling missing
   functions in Liftoff), one isolate finishes and immediately sets a
   breakpoint, then the other isolates finishes and overwrites the code
   with breakpoints by the usual {kForDebugging} code.
   Setting breakpoints is synchronized already, so overwriting
   breakpoint code with other breakpoint code is always safe.

R=thibaudm@chromium.org

Bug: v8:10611, v8:10359
Change-Id: I171d86b110a54f9eb5e4c3fa35108638904212e8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2316080
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69088}
parent 46f674ff
...@@ -1098,10 +1098,18 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) { ...@@ -1098,10 +1098,18 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// code, which is only used for a single frame and never installed in the // code, which is only used for a single frame and never installed in the
// code table of jump table). Otherwise, install code if it was compiled // code table of jump table). Otherwise, install code if it was compiled
// with a higher tier. // with a higher tier.
static_assert(
kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
"for_debugging is ordered");
const bool update_code_table = const bool update_code_table =
tiering_state_ == kTieredDown // Never install stepping code.
? !prior_code || code->for_debugging() == kForDebugging code->for_debugging() != kForStepping &&
: !prior_code || prior_code->tier() < code->tier(); (!prior_code ||
(tiering_state_ == kTieredDown
// Tiered down: Install breakpoints over normal debug code.
? prior_code->for_debugging() <= code->for_debugging()
// Tiered up: Install if the tier is higher than before.
: prior_code->tier() < code->tier()));
if (update_code_table) { if (update_code_table) {
code_table_[slot_idx] = code.get(); code_table_[slot_idx] = code.get();
if (prior_code) { if (prior_code) {
......
...@@ -418,8 +418,9 @@ class DebugInfoImpl { ...@@ -418,8 +418,9 @@ class DebugInfoImpl {
wire_bytes.begin() + function->code.end_offset()}; wire_bytes.begin() + function->code.end_offset()};
std::unique_ptr<DebugSideTable> debug_sidetable; std::unique_ptr<DebugSideTable> debug_sidetable;
ForDebugging for_debugging = ForDebugging for_debugging = offsets.size() == 1 && offsets[0] == 0
offsets.size() == 1 && offsets[0] == 0 ? kForStepping : kForDebugging; ? kForStepping
: kWithBreakpoints;
Counters* counters = nullptr; Counters* counters = nullptr;
WasmFeatures unused_detected; WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation( WasmCompilationResult result = ExecuteLiftoffCompilation(
......
...@@ -653,33 +653,40 @@ void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) { ...@@ -653,33 +653,40 @@ void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) { void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
// Only trigger recompilation after releasing the mutex, otherwise we risk // Only trigger recompilation after releasing the mutex, otherwise we risk
// deadlocks because of lock inversion. // deadlocks because of lock inversion. The bool tells whether the module
std::vector<std::shared_ptr<NativeModule>> native_modules_to_recompile; // needs recompilation for tier up.
std::vector<std::pair<std::shared_ptr<NativeModule>, bool>> native_modules;
{ {
base::MutexGuard lock(&mutex_); base::MutexGuard lock(&mutex_);
isolates_[isolate]->keep_tiered_down = false; isolates_[isolate]->keep_tiered_down = false;
auto test_keep_tiered_down = [this](NativeModule* native_module) { auto test_can_tier_up = [this](NativeModule* native_module) {
DCHECK_EQ(1, native_modules_.count(native_module)); DCHECK_EQ(1, native_modules_.count(native_module));
for (auto* isolate : native_modules_[native_module]->isolates) { for (auto* isolate : native_modules_[native_module]->isolates) {
DCHECK_EQ(1, isolates_.count(isolate)); DCHECK_EQ(1, isolates_.count(isolate));
if (isolates_[isolate]->keep_tiered_down) return true; if (isolates_[isolate]->keep_tiered_down) return false;
} }
return false; return true;
}; };
for (auto* native_module : isolates_[isolate]->native_modules) { for (auto* native_module : isolates_[isolate]->native_modules) {
DCHECK_EQ(1, native_modules_.count(native_module));
auto shared_ptr = native_modules_[native_module]->weak_ptr.lock();
if (!shared_ptr) continue; // The module is not used any more.
if (!native_module->IsTieredDown()) continue; if (!native_module->IsTieredDown()) continue;
// Only start tier-up if no other isolate needs this modules in tiered // Only start tier-up if no other isolate needs this module in tiered
// down state. // down state.
if (test_keep_tiered_down(native_module)) continue; bool tier_up = test_can_tier_up(native_module);
native_module->SetTieringState(kTieredUp); if (tier_up) native_module->SetTieringState(kTieredUp);
DCHECK_EQ(1, native_modules_.count(native_module)); native_modules.emplace_back(std::move(shared_ptr), tier_up);
if (auto shared_ptr = native_modules_[native_module]->weak_ptr.lock()) {
native_modules_to_recompile.emplace_back(std::move(shared_ptr));
}
} }
} }
for (auto& native_module : native_modules_to_recompile) { for (auto& entry : native_modules) {
native_module->RecompileForTiering(); auto& native_module = entry.first;
bool tier_up = entry.second;
// Remove all breakpoints set by this isolate.
if (native_module->HasDebugInfo()) {
native_module->GetDebugInfo()->RemoveIsolate(isolate);
}
if (tier_up) native_module->RecompileForTiering();
} }
} }
......
...@@ -32,9 +32,15 @@ inline const char* ExecutionTierToString(ExecutionTier tier) { ...@@ -32,9 +32,15 @@ inline const char* ExecutionTierToString(ExecutionTier tier) {
} }
} }
// {kForDebugging} is used for default tiered-down code (potentially with // {kForDebugging} is used for default tiered-down code, {kWithBreakpoints} if
// breakpoints), {kForStepping} is code that is flooded with breakpoints. // the code also contains breakpoints, and {kForStepping} for code that is
enum ForDebugging : int8_t { kNoDebugging = 0, kForDebugging, kForStepping }; // flooded with breakpoints.
enum ForDebugging : int8_t {
kNoDebugging = 0,
kForDebugging,
kWithBreakpoints,
kForStepping
};
enum TieringState : int8_t { kTieredUp, kTieredDown }; enum TieringState : int8_t { kTieredUp, kTieredDown };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment