Commit 13567f5f authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[wasm] Remove kFinishedTopTierCompilation event

This fully removes the kFinishedTopTierCompilation event, and any
handling of it. In a dynamic tiering world, that event has no meaning
any more.

R=ahaas@chromium.org

Bug: v8:12899
Change-Id: I36484e36f7c36f2ac4fcb111e67a14509c2eefef
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3667081
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80930}
parent 51d662f7
...@@ -111,14 +111,12 @@ class WireBytesStorage { ...@@ -111,14 +111,12 @@ class WireBytesStorage {
virtual base::Optional<ModuleWireBytes> GetModuleBytes() const = 0; virtual base::Optional<ModuleWireBytes> GetModuleBytes() const = 0;
}; };
// Callbacks will receive either {kFailedCompilation} or both // Callbacks will receive either {kFailedCompilation} or
// {kFinishedBaselineCompilation} and {kFinishedTopTierCompilation}, in that // {kFinishedBaselineCompilation}.
// order. If tier up is off, both events are delivered right after each other.
enum class CompilationEvent : uint8_t { enum class CompilationEvent : uint8_t {
kFinishedBaselineCompilation, kFinishedBaselineCompilation,
kFinishedExportWrappers, kFinishedExportWrappers,
kFinishedCompilationChunk, kFinishedCompilationChunk,
kFinishedTopTierCompilation,
kFailedCompilation, kFailedCompilation,
kFinishedRecompilation kFinishedRecompilation
}; };
...@@ -169,7 +167,6 @@ class V8_EXPORT_PRIVATE CompilationState { ...@@ -169,7 +167,6 @@ class V8_EXPORT_PRIVATE CompilationState {
bool failed() const; bool failed() const;
bool baseline_compilation_finished() const; bool baseline_compilation_finished() const;
bool top_tier_compilation_finished() const;
bool recompilation_finished() const; bool recompilation_finished() const;
void set_compilation_id(int compilation_id); void set_compilation_id(int compilation_id);
......
...@@ -646,11 +646,6 @@ class CompilationStateImpl { ...@@ -646,11 +646,6 @@ class CompilationStateImpl {
outstanding_export_wrappers_ == 0; outstanding_export_wrappers_ == 0;
} }
bool top_tier_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
return outstanding_top_tier_functions_ == 0;
}
bool recompilation_finished() const { bool recompilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_); base::MutexGuard guard(&callbacks_mutex_);
return outstanding_recompilation_functions_ == 0; return outstanding_recompilation_functions_ == 0;
...@@ -873,10 +868,6 @@ bool CompilationState::baseline_compilation_finished() const { ...@@ -873,10 +868,6 @@ bool CompilationState::baseline_compilation_finished() const {
return Impl(this)->baseline_compilation_finished(); return Impl(this)->baseline_compilation_finished();
} }
bool CompilationState::top_tier_compilation_finished() const {
return Impl(this)->top_tier_compilation_finished();
}
bool CompilationState::recompilation_finished() const { bool CompilationState::recompilation_finished() const {
return Impl(this)->recompilation_finished(); return Impl(this)->recompilation_finished();
} }
...@@ -2371,11 +2362,6 @@ class AsyncCompileJob::CompilationStateCallback ...@@ -2371,11 +2362,6 @@ class AsyncCompileJob::CompilationStateCallback
DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_ || DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_ ||
CompilationEvent::kFinishedCompilationChunk == last_event_); CompilationEvent::kFinishedCompilationChunk == last_event_);
break; break;
case CompilationEvent::kFinishedTopTierCompilation:
DCHECK(CompilationEvent::kFinishedBaselineCompilation == last_event_);
// At this point, the job will already be gone, thus do not access it
// here.
break;
case CompilationEvent::kFailedCompilation: case CompilationEvent::kFailedCompilation:
DCHECK(!last_event_.has_value() || DCHECK(!last_event_.has_value() ||
last_event_ == CompilationEvent::kFinishedExportWrappers); last_event_ == CompilationEvent::kFinishedExportWrappers);
...@@ -2389,8 +2375,7 @@ class AsyncCompileJob::CompilationStateCallback ...@@ -2389,8 +2375,7 @@ class AsyncCompileJob::CompilationStateCallback
} }
break; break;
case CompilationEvent::kFinishedRecompilation: case CompilationEvent::kFinishedRecompilation:
// This event can happen either before or after // This event can happen out of order, hence don't remember this in
// {kFinishedTopTierCompilation}, hence don't remember this in
// {last_event_}. // {last_event_}.
return; return;
} }
...@@ -3295,13 +3280,6 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization( ...@@ -3295,13 +3280,6 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
// The {kFinishedBaselineCompilation} event is needed for module // The {kFinishedBaselineCompilation} event is needed for module
// compilation to finish. // compilation to finish.
finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation); finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
if (liftoff_functions.empty() && lazy_functions.empty()) {
// All functions exist now as TurboFan functions, so we can trigger the
// {kFinishedTopTierCompilation} event.
// The {kFinishedTopTierCompilation} event is needed for the C-API so
// that {serialize()} works after {deserialize()}.
finished_events_.Add(CompilationEvent::kFinishedTopTierCompilation);
}
} }
compilation_progress_.assign(module->num_declared_functions, compilation_progress_.assign(module->num_declared_functions,
kProgressAfterTurbofanDeserialization); kProgressAfterTurbofanDeserialization);
...@@ -3418,14 +3396,12 @@ void CompilationStateImpl::AddCallback( ...@@ -3418,14 +3396,12 @@ void CompilationStateImpl::AddCallback(
// Immediately trigger events that already happened. // Immediately trigger events that already happened.
for (auto event : {CompilationEvent::kFinishedExportWrappers, for (auto event : {CompilationEvent::kFinishedExportWrappers,
CompilationEvent::kFinishedBaselineCompilation, CompilationEvent::kFinishedBaselineCompilation,
CompilationEvent::kFinishedTopTierCompilation,
CompilationEvent::kFailedCompilation}) { CompilationEvent::kFailedCompilation}) {
if (finished_events_.contains(event)) { if (finished_events_.contains(event)) {
callback->call(event); callback->call(event);
} }
} }
constexpr base::EnumSet<CompilationEvent> kFinalEvents{ constexpr base::EnumSet<CompilationEvent> kFinalEvents{
CompilationEvent::kFinishedTopTierCompilation,
CompilationEvent::kFailedCompilation}; CompilationEvent::kFailedCompilation};
if (!finished_events_.contains_any(kFinalEvents)) { if (!finished_events_.contains_any(kFinalEvents)) {
callbacks_.emplace_back(std::move(callback)); callbacks_.emplace_back(std::move(callback));
...@@ -3636,17 +3612,17 @@ void CompilationStateImpl::TriggerCallbacks( ...@@ -3636,17 +3612,17 @@ void CompilationStateImpl::TriggerCallbacks(
triggered_events.Add(CompilationEvent::kFinishedExportWrappers); triggered_events.Add(CompilationEvent::kFinishedExportWrappers);
if (outstanding_baseline_units_ == 0) { if (outstanding_baseline_units_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation); triggered_events.Add(CompilationEvent::kFinishedBaselineCompilation);
if (!dynamic_tiering_ && outstanding_top_tier_functions_ == 0) {
triggered_events.Add(CompilationEvent::kFinishedTopTierCompilation);
}
} }
} }
// For dynamic tiering, trigger "compilation chunk finished" after a new chunk
// of size {FLAG_wasm_caching_threshold}.
if (dynamic_tiering_ && static_cast<size_t>(FLAG_wasm_caching_threshold) < if (dynamic_tiering_ && static_cast<size_t>(FLAG_wasm_caching_threshold) <
bytes_since_last_chunk_) { bytes_since_last_chunk_) {
triggered_events.Add(CompilationEvent::kFinishedCompilationChunk); triggered_events.Add(CompilationEvent::kFinishedCompilationChunk);
bytes_since_last_chunk_ = 0; bytes_since_last_chunk_ = 0;
} }
if (compile_failed_.load(std::memory_order_relaxed)) { if (compile_failed_.load(std::memory_order_relaxed)) {
// *Only* trigger the "failed" event. // *Only* trigger the "failed" event.
triggered_events = triggered_events =
...@@ -3670,8 +3646,6 @@ void CompilationStateImpl::TriggerCallbacks( ...@@ -3670,8 +3646,6 @@ void CompilationStateImpl::TriggerCallbacks(
"wasm.ExportWrappersFinished"), "wasm.ExportWrappersFinished"),
std::make_pair(CompilationEvent::kFinishedBaselineCompilation, std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
"wasm.BaselineFinished"), "wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
"wasm.TopTierFinished"),
std::make_pair(CompilationEvent::kFinishedCompilationChunk, std::make_pair(CompilationEvent::kFinishedCompilationChunk,
"wasm.CompilationChunkFinished"), "wasm.CompilationChunkFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation, std::make_pair(CompilationEvent::kFinishedRecompilation,
...@@ -3854,15 +3828,8 @@ void CompilationStateImpl::WaitForCompilationEvent( ...@@ -3854,15 +3828,8 @@ void CompilationStateImpl::WaitForCompilationEvent(
}; };
WaitForEventDelegate delegate{done}; WaitForEventDelegate delegate{done};
// Everything except for top-tier units will be processed with kBaselineOnly
// (including wrappers). Hence we choose this for any event except
// {kFinishedTopTierCompilation}.
auto compile_tiers =
expect_event == CompilationEvent::kFinishedTopTierCompilation
? kBaselineOrTopTier
: kBaselineOnly;
ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate, ExecuteCompilationUnits(native_module_weak_, async_counters_.get(), &delegate,
compile_tiers); kBaselineOnly);
semaphore->Wait(); semaphore->Wait();
} }
......
...@@ -332,10 +332,7 @@ class CompilationChunkFinishedCallback : public CompilationEventCallback { ...@@ -332,10 +332,7 @@ class CompilationChunkFinishedCallback : public CompilationEventCallback {
} }
void call(CompilationEvent event) override { void call(CompilationEvent event) override {
if (event != CompilationEvent::kFinishedCompilationChunk && if (event != CompilationEvent::kFinishedCompilationChunk) return;
event != CompilationEvent::kFinishedTopTierCompilation) {
return;
}
// If the native module is still alive, get back a shared ptr and call the // If the native module is still alive, get back a shared ptr and call the
// callback. // callback.
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) { if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
......
...@@ -1705,15 +1705,11 @@ void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) { ...@@ -1705,15 +1705,11 @@ void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {
} }
void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) { void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) {
if (tier == WasmCompilationUnit::GetBaselineExecutionTier(this->module())) { if (!compilation_state_->baseline_compilation_finished()) {
if (!compilation_state_->baseline_compilation_finished()) { baseline_compilation_cpu_duration_.fetch_add(cpu_duration,
baseline_compilation_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
std::memory_order_relaxed);
}
} else if (tier == ExecutionTier::kTurbofan) { } else if (tier == ExecutionTier::kTurbofan) {
if (!compilation_state_->top_tier_compilation_finished()) { tier_up_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
tier_up_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
}
} }
} }
......
...@@ -195,7 +195,6 @@ TEST(Run_WasmModule_CompilationHintsNoTiering) { ...@@ -195,7 +195,6 @@ TEST(Run_WasmModule_CompilationHintsNoTiering) {
CHECK_EQ(expected_tier, actual_tier); CHECK_EQ(expected_tier, actual_tier);
auto* compilation_state = native_module->compilation_state(); auto* compilation_state = native_module->compilation_state();
CHECK(compilation_state->baseline_compilation_finished()); CHECK(compilation_state->baseline_compilation_finished());
CHECK(compilation_state->top_tier_compilation_finished());
} }
Cleanup(); Cleanup();
} }
...@@ -247,19 +246,14 @@ TEST(Run_WasmModule_CompilationHintsTierUp) { ...@@ -247,19 +246,14 @@ TEST(Run_WasmModule_CompilationHintsTierUp) {
CHECK(compilation_state->baseline_compilation_finished()); CHECK(compilation_state->baseline_compilation_finished());
} }
// Busy wait for top tier compilation to finish. // Tier-up is happening in the background. Eventually we should have top
while (!compilation_state->top_tier_compilation_finished()) { // tier code.
}
// Expect top tier code.
ExecutionTier top_tier = ExecutionTier::kTurbofan; ExecutionTier top_tier = ExecutionTier::kTurbofan;
{ ExecutionTier actual_tier = ExecutionTier::kNone;
while (actual_tier != top_tier) {
CHECK(native_module->HasCode(kFuncIndex)); CHECK(native_module->HasCode(kFuncIndex));
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier(); actual_tier = native_module->GetCode(kFuncIndex)->tier();
CHECK_EQ(top_tier, actual_tier);
CHECK(compilation_state->baseline_compilation_finished());
CHECK(compilation_state->top_tier_compilation_finished());
} }
} }
Cleanup(); Cleanup();
...@@ -301,23 +295,19 @@ TEST(Run_WasmModule_CompilationHintsLazyBaselineEagerTopTier) { ...@@ -301,23 +295,19 @@ TEST(Run_WasmModule_CompilationHintsLazyBaselineEagerTopTier) {
NativeModule* native_module = module.ToHandleChecked()->native_module(); NativeModule* native_module = module.ToHandleChecked()->native_module();
auto* compilation_state = native_module->compilation_state(); auto* compilation_state = native_module->compilation_state();
// Busy wait for top tier compilation to finish. // We have no code initially (because of lazy baseline), but eventually we
while (!compilation_state->top_tier_compilation_finished()) { // should have TurboFan ready (because of eager top tier).
}
// Expect top tier code.
static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan, static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
"Assume an order on execution tiers"); "Assume an order on execution tiers");
static const int kFuncIndex = 0; constexpr int kFuncIndex = 0;
ExecutionTier top_tier = ExecutionTier::kTurbofan; WasmCodeRefScope code_ref_scope;
{ while (true) {
CHECK(native_module->HasCode(kFuncIndex)); auto* code = native_module->GetCode(kFuncIndex);
WasmCodeRefScope code_ref_scope; if (!code) continue;
ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier(); CHECK_EQ(ExecutionTier::kTurbofan, code->tier());
CHECK_EQ(top_tier, actual_tier); break;
CHECK(compilation_state->baseline_compilation_finished());
CHECK(compilation_state->top_tier_compilation_finished());
} }
CHECK(compilation_state->baseline_compilation_finished());
} }
Cleanup(); Cleanup();
} }
......
...@@ -1322,6 +1322,11 @@ STREAM_TEST(TestFunctionSectionWithoutCodeSection) { ...@@ -1322,6 +1322,11 @@ STREAM_TEST(TestFunctionSectionWithoutCodeSection) {
} }
STREAM_TEST(TestSetModuleCompiledCallback) { STREAM_TEST(TestSetModuleCompiledCallback) {
// The "module compiled" callback (to be renamed to "top tier chunk finished"
// or similar) will only be triggered with dynamic tiering, so skip this test
// if dynamic tiering is disabled.
if (!FLAG_wasm_dynamic_tiering) return;
// Reduce the caching threshold so that our three small functions trigger // Reduce the caching threshold so that our three small functions trigger
// caching. // caching.
FlagScope<int> caching_treshold(&FLAG_wasm_caching_threshold, 10); FlagScope<int> caching_treshold(&FLAG_wasm_caching_threshold, 10);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment