Commit a776ccaa authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Clean up tiering logic

This cleans up several parts of the tiering logic.
1) Instead of using the {ExecutionTier} to specify whether we do tier up
   or down, we introduce a new {TieringState} enum and use that
   consistently (also where a {bool} was used before).
2) When tiering up or tiering down, always recompile all functions. It's
   very unlikely that we can reuse previous code anyway (tiering down is
   cheap enough to just always do it, and when tiering up we need to
   recompile everything anyway).
3) Remove the {WasmEngine::RecompileAllFunctions} method and inline the
   implementation into callers.
4) Drive-by: Remove some obsolete comments and fix or extend others.

R=thibaudm@chromium.org

Bug: v8:10410
Change-Id: Ic765c6760dd97473ccfd469f22a2514695075587
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2151355Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67175}
parent 5ee9cf87
......@@ -386,7 +386,7 @@ class CompilationStateImpl {
// called immediately if no recompilation is needed, or called later
// otherwise.
void InitializeRecompilation(
ExecutionTier tier,
TieringState new_tiering_state,
CompilationState::callback_t recompilation_finished_callback);
// Add the callback function to be called on compilation events. Needs to be
......@@ -556,7 +556,8 @@ class CompilationStateImpl {
std::vector<uint8_t> compilation_progress_;
int outstanding_recompilation_functions_ = 0;
ExecutionTier recompilation_tier_;
TieringState tiering_state_ = kTieredUp;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
......@@ -1424,22 +1425,22 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
}
void RecompileNativeModule(Isolate* isolate, NativeModule* native_module,
ExecutionTier tier) {
TieringState tiering_state) {
// Install a callback to notify us once background recompilation finished.
auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
auto* compilation_state = Impl(native_module->compilation_state());
DCHECK(tier == ExecutionTier::kTurbofan || tier == ExecutionTier::kLiftoff);
// The callback captures a shared ptr to the semaphore.
// Initialize the compilation units and kick off background compile tasks.
compilation_state->InitializeRecompilation(
tier, [recompilation_finished_semaphore](CompilationEvent event) {
tiering_state,
[recompilation_finished_semaphore](CompilationEvent event) {
if (event == CompilationEvent::kFinishedRecompilation) {
recompilation_finished_semaphore->Signal();
}
});
// For tier down only.
if (tier == ExecutionTier::kLiftoff) {
// We only wait for tier down. Tier up can happen in the background.
if (tiering_state == kTieredDown) {
// The main thread contributes to the compilation, except if we need
// deterministic compilation; in that case, the single background task will
// execute all compilation.
......@@ -2461,11 +2462,9 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
for (int func_index = start; func_index < end; func_index++) {
if (prefer_liftoff) {
constexpr uint8_t kLiftoffOnlyFunctionProgress =
RequiredTopTierField::update(
RequiredBaselineTierField::update(
ReachedTierField::encode(ExecutionTier::kNone),
ExecutionTier::kLiftoff),
ExecutionTier::kLiftoff);
RequiredTopTierField::encode(ExecutionTier::kLiftoff) |
RequiredBaselineTierField::encode(ExecutionTier::kLiftoff) |
ReachedTierField::encode(ExecutionTier::kNone);
compilation_progress_.push_back(kLiftoffOnlyFunctionProgress);
outstanding_baseline_units_++;
outstanding_top_tier_functions_++;
......@@ -2522,7 +2521,7 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module,
}
void CompilationStateImpl::InitializeRecompilation(
ExecutionTier tier,
TieringState new_tiering_state,
CompilationState::callback_t recompilation_finished_callback) {
DCHECK(!failed());
......@@ -2534,27 +2533,21 @@ void CompilationStateImpl::InitializeRecompilation(
// Restart recompilation if another recompilation is already happening.
outstanding_recompilation_functions_ = 0;
// If compilation hasn't started yet then code would be keep as tiered-down
// If compilation hasn't started yet then code would be kept as tiered-down
// and don't need to recompile.
if (compilation_progress_.size() > 0) {
int start = native_module_->module()->num_imported_functions;
int end = start + native_module_->module()->num_declared_functions;
for (int function_index = start; function_index < end; function_index++) {
int slot_index = function_index - start;
DCHECK_LT(slot_index, compilation_progress_.size());
ExecutionTier reached_tier =
ReachedTierField::decode(compilation_progress_[slot_index]);
// Ignore Liftoff code, since we don't know if it was compiled with
// debugging support.
bool has_correct_tier =
tier == ExecutionTier::kTurbofan && reached_tier == tier &&
native_module_->HasCodeWithTier(function_index, tier);
if (!has_correct_tier) {
compilation_progress_[slot_index] = MissingRecompilationField::update(
compilation_progress_[slot_index], true);
outstanding_recompilation_functions_++;
builder.AddRecompilationUnit(function_index, tier);
}
const WasmModule* module = native_module_->module();
int imported = module->num_imported_functions;
int declared = module->num_declared_functions;
outstanding_recompilation_functions_ = declared;
DCHECK_EQ(declared, compilation_progress_.size());
for (int slot_index = 0; slot_index < declared; ++slot_index) {
compilation_progress_[slot_index] = MissingRecompilationField::update(
compilation_progress_[slot_index], true);
builder.AddRecompilationUnit(imported + slot_index,
new_tiering_state == kTieredDown
? ExecutionTier::kLiftoff
: ExecutionTier::kTurbofan);
}
}
......@@ -2564,7 +2557,7 @@ void CompilationStateImpl::InitializeRecompilation(
recompilation_finished_callback(CompilationEvent::kFinishedRecompilation);
} else {
callbacks_.emplace_back(std::move(recompilation_finished_callback));
recompilation_tier_ = tier;
tiering_state_ = new_tiering_state;
}
}
......@@ -2710,19 +2703,17 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
outstanding_top_tier_functions_--;
}
if (V8_UNLIKELY(outstanding_recompilation_functions_ > 0) &&
MissingRecompilationField::decode(function_progress)) {
if (V8_UNLIKELY(MissingRecompilationField::decode(function_progress))) {
DCHECK_LT(0, outstanding_recompilation_functions_);
// If tiering up, accept any TurboFan code. For tiering down, look at
// the {for_debugging} flag. The tier can be Liftoff or TurboFan and is
// irrelevant here. In particular, we want to ignore any outstanding
// non-debugging units.
// TODO(clemensb): Replace {recompilation_tier_} by a better flag.
bool matches = recompilation_tier_ == ExecutionTier::kLiftoff
bool matches = tiering_state_ == kTieredDown
? code->for_debugging()
: code->tier() == ExecutionTier::kTurbofan;
if (matches) {
outstanding_recompilation_functions_--;
// Update function's recompilation progress.
compilation_progress_[slot_index] = MissingRecompilationField::update(
compilation_progress_[slot_index], false);
if (outstanding_recompilation_functions_ == 0) {
......
......@@ -45,7 +45,7 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
Handle<FixedArray>* export_wrappers_out);
void RecompileNativeModule(Isolate* isolate, NativeModule* native_module,
ExecutionTier tier);
TieringState new_tiering_state);
V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
......
......@@ -24,6 +24,7 @@
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module-sourcemap.h"
......@@ -1069,8 +1070,9 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
uint32_t slot_idx = declared_function_index(module(), code->index());
WasmCode* prior_code = code_table_[slot_idx];
const bool update_code_table =
tier_down_ ? !prior_code || code->for_debugging()
: !prior_code || prior_code->tier() < code->tier();
tiering_state_ == kTieredDown
? !prior_code || code->for_debugging()
: !prior_code || prior_code->tier() < code->tier();
if (update_code_table) {
code_table_[slot_idx] = code.get();
if (prior_code) {
......@@ -1834,44 +1836,40 @@ bool NativeModule::IsRedirectedToInterpreter(uint32_t func_index) {
bool NativeModule::SetTieredDown() {
// Do not tier down asm.js.
if (module()->origin != kWasmOrigin) return false;
if (module()->origin != kWasmOrigin) return true;
base::MutexGuard lock(&allocation_mutex_);
if (tier_down_) return true;
tier_down_ = true;
return false;
if (tiering_state_ == kTieredDown) return false;
tiering_state_ = kTieredDown;
return true;
}
bool NativeModule::IsTieredDown() {
base::MutexGuard lock(&allocation_mutex_);
return tier_down_;
return tiering_state_ == kTieredDown;
}
void NativeModule::TierDown(Isolate* isolate) {
// Do not tier down asm.js.
if (module()->origin != kWasmOrigin) return;
// Set the flag. Return if it is already set.
if (SetTieredDown()) return;
// Set the module to tiered down state; return if it is already in that state.
if (!SetTieredDown()) return;
// Tier down all functions.
isolate->wasm_engine()->RecompileAllFunctions(isolate, this,
ExecutionTier::kLiftoff);
RecompileNativeModule(isolate, this, kTieredDown);
}
void NativeModule::TierUp(Isolate* isolate) {
// Do not tier up asm.js.
if (module()->origin != kWasmOrigin) return;
// Set the flag.
{
base::MutexGuard lock(&allocation_mutex_);
tier_down_ = false;
if (tiering_state_ == kTieredUp) return;
tiering_state_ = kTieredUp;
}
// Tier up all functions.
isolate->wasm_engine()->RecompileAllFunctions(isolate, this,
ExecutionTier::kTurbofan);
RecompileNativeModule(isolate, this, kTieredUp);
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
......
......@@ -590,7 +590,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// by publishing an entry stub with the {Kind::kInterpreterEntry} code kind.
bool IsRedirectedToInterpreter(uint32_t func_index);
// Set {tier_down_} flag. Return previous state.
// Set to tiered down state. Returns {true} if this caused a change, {false}
// otherwise.
bool SetTieredDown();
bool IsTieredDown();
......@@ -749,7 +750,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
// mutex.
std::unique_ptr<DebugInfo> debug_info_;
bool tier_down_ = false;
TieringState tiering_state_ = kTieredUp;
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
......
......@@ -613,12 +613,6 @@ void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
&native_module->module()->functions[function_index], tier);
}
void WasmEngine::RecompileAllFunctions(Isolate* isolate,
NativeModule* native_module,
ExecutionTier tier) {
RecompileNativeModule(isolate, native_module, tier);
}
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
std::vector<NativeModule*> native_modules;
{
......
......@@ -193,10 +193,6 @@ class V8_EXPORT_PRIVATE WasmEngine {
void CompileFunction(Isolate* isolate, NativeModule* native_module,
uint32_t function_index, ExecutionTier tier);
// Recompiles all functions at a specific compilation tier.
void RecompileAllFunctions(Isolate* isolate, NativeModule* native_module,
ExecutionTier tier);
void TierDownAllModulesPerIsolate(Isolate* isolate);
void TierUpAllModulesPerIsolate(Isolate* isolate);
......
......@@ -33,6 +33,7 @@ inline const char* ExecutionTierToString(ExecutionTier tier) {
}
enum ForDebugging : bool { kForDebugging = true, kNoDebugging = false };
enum TieringState : int8_t { kTieredUp, kTieredDown };
} // namespace wasm
} // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment