Commit 384598dc authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

Revert "Reland "[wasm] Refactor compilation tier computations""

This reverts commit b3a27f22.

Reason for revert: Fails 'debug-enabled-tier-down-wasm' flakily (https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Win64/48026/overview)

Original change's description:
> Reland "[wasm] Refactor compilation tier computations"
>
> This is a reland of commit e50472d6.
> In {ApplyCompilationHintToInitialProgress} we would reset the baseline
> tier to {kNone} if the compilation strategy is {kDefault}, which is
> wrong. We would not generate code but also not install the lazy stub,
> so whenever we start executing the code before top-tier is ready we
> would crash.
>
> Original change's description:
> > [wasm] Refactor compilation tier computations
> >
> > The way we initialized the "compilation progress" was pretty convoluted,
> > with multiple levels of functions being called for initializing every
> > single slot.
> >
> > This CL refactors this to compute one default value for the whole
> > module, and only modifies those slots that need special handling (e.g.
> > because of compilation hints, or lazy/eager compilation after
> > deserialization).
> >
> > We also rename "liftoff_functions" to "eager_functions" in the
> > deserialization path; the idea is that those functions should get
> > eagerly compiled because we expect them to be needed during execution.
> > Usually they would be Liftoff-compiled, but it's more consistent to use
> > the existing logic to choose the baseline tier. In the default
> > configuration, this will still use Liftoff, but if Liftoff is disabled
> > we will use TurboFan instead.
> >
> > R=jkummerow@chromium.org, ahaas@chromium.org
> >
> > Bug: v8:12425
> > Change-Id: Ie58840b19efd0b1e98f1b02d5f1d4369410ed8e1
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3829606
> > Commit-Queue: Clemens Backes <clemensb@chromium.org>
> > Reviewed-by: Andreas Haas <ahaas@chromium.org>
> > Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> > Cr-Commit-Position: refs/heads/main@{#82521}
>
> Bug: v8:12425
> Change-Id: Ie41e63148bf6bd0e38fc07a3a514f1094d9d26cf
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3838409
> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> Commit-Queue: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#82585}

Bug: v8:12425
Change-Id: Ic86d3f5b0e0603dae62ccead3be052d928209506
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3842208
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Auto-Submit: Clemens Backes <clemensb@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/main@{#82588}
parent e2bfd44c
...@@ -167,8 +167,9 @@ class V8_EXPORT_PRIVATE CompilationState { ...@@ -167,8 +167,9 @@ class V8_EXPORT_PRIVATE CompilationState {
void AddCallback(std::unique_ptr<CompilationEventCallback> callback); void AddCallback(std::unique_ptr<CompilationEventCallback> callback);
void InitializeAfterDeserialization(base::Vector<const int> lazy_functions, void InitializeAfterDeserialization(
base::Vector<const int> eager_functions); base::Vector<const int> lazy_functions,
base::Vector<const int> liftoff_functions);
// Set a higher priority for the compilation job. // Set a higher priority for the compilation job.
void SetHighPriority(); void SetHighPriority();
......
...@@ -16,7 +16,18 @@ ...@@ -16,7 +16,18 @@
#include "src/wasm/wasm-debug.h" #include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-engine.h"
namespace v8::internal::wasm { namespace v8 {
namespace internal {
namespace wasm {
// static
ExecutionTier WasmCompilationUnit::GetBaselineExecutionTier(
const WasmModule* module) {
// Liftoff does not support the special asm.js opcodes, thus always compile
// asm.js modules with TurboFan.
if (is_asmjs_module(module)) return ExecutionTier::kTurbofan;
return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation( WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
CompilationEnv* env, const WireBytesStorage* wire_bytes_storage, CompilationEnv* env, const WireBytesStorage* wire_bytes_storage,
...@@ -264,4 +275,6 @@ Handle<CodeT> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper( ...@@ -264,4 +275,6 @@ Handle<CodeT> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
return unit.Finalize(); return unit.Finalize();
} }
} // namespace v8::internal::wasm } // namespace wasm
} // namespace internal
} // namespace v8
...@@ -62,6 +62,8 @@ struct WasmCompilationResult { ...@@ -62,6 +62,8 @@ struct WasmCompilationResult {
class V8_EXPORT_PRIVATE WasmCompilationUnit final { class V8_EXPORT_PRIVATE WasmCompilationUnit final {
public: public:
static ExecutionTier GetBaselineExecutionTier(const WasmModule*);
WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging) WasmCompilationUnit(int index, ExecutionTier tier, ForDebugging for_debugging)
: func_index_(index), tier_(tier), for_debugging_(for_debugging) {} : func_index_(index), tier_(tier), for_debugging_(for_debugging) {}
......
...@@ -558,22 +558,17 @@ class CompilationStateImpl { ...@@ -558,22 +558,17 @@ class CompilationStateImpl {
bool cancelled() const; bool cancelled() const;
// Apply a compilation hint to the initial compilation progress, updating all
// internal fields accordingly.
void ApplyCompilationHintToInitialProgress(const WasmCompilationHint& hint,
size_t hint_idx);
// Initialize compilation progress. Set compilation tiers to expect for // Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before // baseline and top tier compilation. Must be set before
// {CommitCompilationUnits} is invoked which triggers background compilation. // {CommitCompilationUnits} is invoked which triggers background compilation.
void InitializeCompilationProgress(int num_import_wrappers, void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers,
int num_export_wrappers); int num_export_wrappers);
// Initialize the compilation progress after deserialization. This is needed // Initialize the compilation progress after deserialization. This is needed
// for recompilation (e.g. for tier down) to work later. // for recompilation (e.g. for tier down) to work later.
void InitializeCompilationProgressAfterDeserialization( void InitializeCompilationProgressAfterDeserialization(
base::Vector<const int> lazy_functions, base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions); base::Vector<const int> liftoff_functions);
// Initializes compilation units based on the information encoded in the // Initializes compilation units based on the information encoded in the
// {compilation_progress_}. // {compilation_progress_}.
...@@ -677,6 +672,10 @@ class CompilationStateImpl { ...@@ -677,6 +672,10 @@ class CompilationStateImpl {
} }
private: private:
uint8_t SetupCompilationProgressForFunction(
bool lazy_function, NativeModule* module,
const WasmFeatures& enabled_features, int func_index);
// Returns the potentially-updated {function_progress}. // Returns the potentially-updated {function_progress}.
uint8_t AddCompilationUnitInternal(CompilationUnitBuilder* builder, uint8_t AddCompilationUnitInternal(CompilationUnitBuilder* builder,
int function_index, int function_index,
...@@ -852,9 +851,9 @@ void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); } ...@@ -852,9 +851,9 @@ void CompilationState::SetHighPriority() { Impl(this)->SetHighPriority(); }
void CompilationState::InitializeAfterDeserialization( void CompilationState::InitializeAfterDeserialization(
base::Vector<const int> lazy_functions, base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions) { base::Vector<const int> liftoff_functions) {
Impl(this)->InitializeCompilationProgressAfterDeserialization( Impl(this)->InitializeCompilationProgressAfterDeserialization(
lazy_functions, eager_functions); lazy_functions, liftoff_functions);
} }
bool CompilationState::failed() const { return Impl(this)->failed(); } bool CompilationState::failed() const { return Impl(this)->failed(); }
...@@ -940,59 +939,46 @@ struct ExecutionTierPair { ...@@ -940,59 +939,46 @@ struct ExecutionTierPair {
ExecutionTier top_tier; ExecutionTier top_tier;
}; };
ExecutionTierPair GetDefaultTiersPerModule(NativeModule* native_module, ExecutionTierPair GetRequestedExecutionTiers(
DynamicTiering dynamic_tiering, NativeModule* native_module, const WasmFeatures& enabled_features,
bool lazy_module) { uint32_t func_index) {
const WasmModule* module = native_module->module(); const WasmModule* module = native_module->module();
if (lazy_module) { ExecutionTierPair result;
return {ExecutionTier::kNone, ExecutionTier::kNone};
}
if (is_asmjs_module(module)) {
return {ExecutionTier::kTurbofan, ExecutionTier::kTurbofan};
}
if (native_module->IsTieredDown()) {
return {ExecutionTier::kLiftoff, ExecutionTier::kLiftoff};
}
ExecutionTier baseline_tier =
FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
bool eager_tier_up = !dynamic_tiering && FLAG_wasm_tier_up;
ExecutionTier top_tier =
eager_tier_up ? ExecutionTier::kTurbofan : baseline_tier;
return {baseline_tier, top_tier};
}
ExecutionTierPair GetLazyCompilationTiers(NativeModule* native_module, result.baseline_tier = WasmCompilationUnit::GetBaselineExecutionTier(module);
uint32_t func_index) {
DynamicTiering dynamic_tiering = bool dynamic_tiering =
Impl(native_module->compilation_state())->dynamic_tiering(); Impl(native_module->compilation_state())->dynamic_tiering();
// For lazy compilation, get the tiers we would use if lazy compilation is bool tier_up_enabled = !dynamic_tiering && FLAG_wasm_tier_up;
// disabled. if (module->origin != kWasmOrigin || !tier_up_enabled ||
constexpr bool kNotLazy = false; V8_UNLIKELY(FLAG_wasm_tier_up_filter >= 0 &&
ExecutionTierPair tiers = func_index !=
GetDefaultTiersPerModule(native_module, dynamic_tiering, kNotLazy); static_cast<uint32_t>(FLAG_wasm_tier_up_filter))) {
result.top_tier = result.baseline_tier;
return result;
}
// Default tiering behaviour.
result.top_tier = ExecutionTier::kTurbofan;
// Check if compilation hints override default tiering behaviour. // Check if compilation hints override default tiering behaviour.
if (native_module->enabled_features().has_compilation_hints()) { if (enabled_features.has_compilation_hints()) {
if (auto* hint = GetCompilationHint(native_module->module(), func_index)) { const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
tiers.baseline_tier = if (hint != nullptr) {
ApplyHintToExecutionTier(hint->baseline_tier, tiers.baseline_tier); result.baseline_tier =
tiers.top_tier = ApplyHintToExecutionTier(hint->top_tier, tiers.top_tier); ApplyHintToExecutionTier(hint->baseline_tier, result.baseline_tier);
result.top_tier =
ApplyHintToExecutionTier(hint->top_tier, result.top_tier);
} }
} }
if (V8_UNLIKELY(FLAG_wasm_tier_up_filter >= 0 &&
func_index !=
static_cast<uint32_t>(FLAG_wasm_tier_up_filter))) {
tiers.top_tier = tiers.baseline_tier;
}
// Correct top tier if necessary. // Correct top tier if necessary.
static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan, static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
"Assume an order on execution tiers"); "Assume an order on execution tiers");
if (tiers.baseline_tier > tiers.top_tier) { if (result.baseline_tier > result.top_tier) {
tiers.top_tier = tiers.baseline_tier; result.top_tier = result.baseline_tier;
} }
return tiers; return result;
} }
// The {CompilationUnitBuilder} builds compilation units and stores them in an // The {CompilationUnitBuilder} builds compilation units and stores them in an
...@@ -1003,10 +989,21 @@ class CompilationUnitBuilder { ...@@ -1003,10 +989,21 @@ class CompilationUnitBuilder {
explicit CompilationUnitBuilder(NativeModule* native_module) explicit CompilationUnitBuilder(NativeModule* native_module)
: native_module_(native_module) {} : native_module_(native_module) {}
void AddImportUnit(uint32_t func_index) { void AddUnits(uint32_t func_index) {
DCHECK_GT(native_module_->module()->num_imported_functions, func_index); if (func_index < native_module_->module()->num_imported_functions) {
baseline_units_.emplace_back(func_index, ExecutionTier::kNone, baseline_units_.emplace_back(func_index, ExecutionTier::kNone,
kNoDebugging); kNoDebugging);
return;
}
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_, native_module_->enabled_features(), func_index);
// Compile everything for non-debugging initially. If needed, we will tier
// down when the module is fully compiled. Synchronization would be pretty
// difficult otherwise.
baseline_units_.emplace_back(func_index, tiers.baseline_tier, kNoDebugging);
if (tiers.baseline_tier != tiers.top_tier) {
tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging);
}
} }
void AddJSToWasmWrapperUnit( void AddJSToWasmWrapperUnit(
...@@ -1176,6 +1173,9 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1176,6 +1173,9 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
lazy_compile_time_scope.emplace(counters, native_module); lazy_compile_time_scope.emplace(counters, native_module);
} }
const WasmModule* module = native_module->module();
auto enabled_features = native_module->enabled_features();
DCHECK(!native_module->lazy_compile_frozen()); DCHECK(!native_module->lazy_compile_frozen());
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index); TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
...@@ -1186,7 +1186,8 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1186,7 +1186,8 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
CompilationStateImpl* compilation_state = CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state()); Impl(native_module->compilation_state());
ExecutionTierPair tiers = GetLazyCompilationTiers(native_module, func_index); ExecutionTierPair tiers =
GetRequestedExecutionTiers(native_module, enabled_features, func_index);
DCHECK_LE(native_module->num_imported_functions(), func_index); DCHECK_LE(native_module->num_imported_functions(), func_index);
DCHECK_LT(func_index, native_module->num_functions()); DCHECK_LT(func_index, native_module->num_functions());
...@@ -1234,10 +1235,9 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1234,10 +1235,9 @@ bool CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance,
counters->wasm_lazily_compiled_functions()->Increment(); counters->wasm_lazily_compiled_functions()->Increment();
const WasmModule* module = native_module->module();
const bool lazy_module = IsLazyModule(module); const bool lazy_module = IsLazyModule(module);
if (GetCompileStrategy(module, native_module->enabled_features(), func_index, if (GetCompileStrategy(module, enabled_features, func_index, lazy_module) ==
lazy_module) == CompileStrategy::kLazy && CompileStrategy::kLazy &&
tiers.baseline_tier < tiers.top_tier) { tiers.baseline_tier < tiers.top_tier) {
WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging}; WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging};
compilation_state->CommitTopTierCompilationUnit(tiering_unit); compilation_state->CommitTopTierCompilationUnit(tiering_unit);
...@@ -1730,7 +1730,7 @@ int AddImportWrapperUnits(NativeModule* native_module, ...@@ -1730,7 +1730,7 @@ int AddImportWrapperUnits(NativeModule* native_module,
// Ensure that all keys exist in the cache, so that we can populate the // Ensure that all keys exist in the cache, so that we can populate the
// cache later without locking. // cache later without locking.
(*native_module->import_wrapper_cache())[key] = nullptr; (*native_module->import_wrapper_cache())[key] = nullptr;
builder->AddImportUnit(func_index); builder->AddUnits(func_index);
} }
} }
return static_cast<int>(keys.size()); return static_cast<int>(keys.size());
...@@ -1763,12 +1763,13 @@ std::unique_ptr<CompilationUnitBuilder> InitializeCompilation( ...@@ -1763,12 +1763,13 @@ std::unique_ptr<CompilationUnitBuilder> InitializeCompilation(
InitializeLazyCompilation(native_module); InitializeLazyCompilation(native_module);
CompilationStateImpl* compilation_state = CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state()); Impl(native_module->compilation_state());
const bool lazy_module = IsLazyModule(native_module->module());
auto builder = std::make_unique<CompilationUnitBuilder>(native_module); auto builder = std::make_unique<CompilationUnitBuilder>(native_module);
int num_import_wrappers = AddImportWrapperUnits(native_module, builder.get()); int num_import_wrappers = AddImportWrapperUnits(native_module, builder.get());
int num_export_wrappers = int num_export_wrappers =
AddExportWrapperUnits(isolate, native_module, builder.get()); AddExportWrapperUnits(isolate, native_module, builder.get());
compilation_state->InitializeCompilationProgress(num_import_wrappers, compilation_state->InitializeCompilationProgress(
num_export_wrappers); lazy_module, num_import_wrappers, num_export_wrappers);
return builder; return builder;
} }
...@@ -3109,81 +3110,68 @@ bool CompilationStateImpl::cancelled() const { ...@@ -3109,81 +3110,68 @@ bool CompilationStateImpl::cancelled() const {
return compile_cancelled_.load(std::memory_order_relaxed); return compile_cancelled_.load(std::memory_order_relaxed);
} }
void CompilationStateImpl::ApplyCompilationHintToInitialProgress( uint8_t CompilationStateImpl::SetupCompilationProgressForFunction(
const WasmCompilationHint& hint, size_t hint_idx) { bool lazy_function, NativeModule* native_module,
// Get old information. const WasmFeatures& enabled_features, int func_index) {
uint8_t& progress = compilation_progress_[hint_idx]; ExecutionTierPair requested_tiers =
ExecutionTier old_baseline_tier = RequiredBaselineTierField::decode(progress); GetRequestedExecutionTiers(native_module, enabled_features, func_index);
ExecutionTier old_top_tier = RequiredTopTierField::decode(progress); CompileStrategy strategy = GetCompileStrategy(
native_module->module(), enabled_features, func_index, lazy_function);
// Compute new information. bool required_for_baseline = strategy == CompileStrategy::kEager;
ExecutionTier new_baseline_tier = bool required_for_top_tier = strategy != CompileStrategy::kLazy;
ApplyHintToExecutionTier(hint.baseline_tier, old_baseline_tier); DCHECK_EQ(required_for_top_tier,
ExecutionTier new_top_tier = strategy == CompileStrategy::kEager ||
ApplyHintToExecutionTier(hint.top_tier, old_top_tier); strategy == CompileStrategy::kLazyBaselineEagerTopTier);
switch (hint.strategy) {
case WasmCompilationHintStrategy::kDefault:
// Be careful not to switch from lazy to non-lazy.
if (old_baseline_tier == ExecutionTier::kNone) {
new_baseline_tier = ExecutionTier::kNone;
}
if (old_top_tier == ExecutionTier::kNone) {
new_top_tier = ExecutionTier::kNone;
}
break;
case WasmCompilationHintStrategy::kLazy:
new_baseline_tier = ExecutionTier::kNone;
new_top_tier = ExecutionTier::kNone;
break;
case WasmCompilationHintStrategy::kEager:
// Nothing to do, use the encoded (new) tiers.
break;
case WasmCompilationHintStrategy::kLazyBaselineEagerTopTier:
new_baseline_tier = ExecutionTier::kNone;
break;
}
progress = RequiredBaselineTierField::update(progress, new_baseline_tier); // Count functions to complete baseline and top tier compilation.
progress = RequiredTopTierField::update(progress, new_top_tier); if (required_for_baseline) outstanding_baseline_units_++;
// Update counter for outstanding baseline units. // Initialize function's compilation progress.
outstanding_baseline_units_ += (new_baseline_tier != ExecutionTier::kNone) - ExecutionTier required_baseline_tier = required_for_baseline
(old_baseline_tier != ExecutionTier::kNone); ? requested_tiers.baseline_tier
: ExecutionTier::kNone;
ExecutionTier required_top_tier =
required_for_top_tier ? requested_tiers.top_tier : ExecutionTier::kNone;
uint8_t function_progress =
ReachedTierField::encode(ExecutionTier::kNone) |
RequiredBaselineTierField::encode(required_baseline_tier) |
RequiredTopTierField::encode(required_top_tier);
return function_progress;
} }
void CompilationStateImpl::InitializeCompilationProgress( void CompilationStateImpl::InitializeCompilationProgress(
int num_import_wrappers, int num_export_wrappers) { bool lazy_module, int num_import_wrappers, int num_export_wrappers) {
DCHECK(!failed()); DCHECK(!failed());
auto enabled_features = native_module_->enabled_features();
auto* module = native_module_->module(); auto* module = native_module_->module();
base::MutexGuard guard(&callbacks_mutex_); base::MutexGuard guard(&callbacks_mutex_);
DCHECK_EQ(0, outstanding_baseline_units_); DCHECK_EQ(0, outstanding_baseline_units_);
DCHECK_EQ(0, outstanding_export_wrappers_); DCHECK_EQ(0, outstanding_export_wrappers_);
compilation_progress_.reserve(module->num_declared_functions);
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
// Compute the default compilation progress for all functions, and set it. const bool prefer_liftoff = native_module_->IsTieredDown();
const ExecutionTierPair default_tiers = GetDefaultTiersPerModule( for (int func_index = start; func_index < end; func_index++) {
native_module_, dynamic_tiering_, IsLazyModule(module)); if (prefer_liftoff) {
const uint8_t default_progress = constexpr uint8_t kLiftoffOnlyFunctionProgress =
RequiredBaselineTierField::encode(default_tiers.baseline_tier) | RequiredTopTierField::encode(ExecutionTier::kLiftoff) |
RequiredTopTierField::encode(default_tiers.top_tier) | RequiredBaselineTierField::encode(ExecutionTier::kLiftoff) |
ReachedTierField::encode(ExecutionTier::kNone); ReachedTierField::encode(ExecutionTier::kNone);
compilation_progress_.assign(module->num_declared_functions, compilation_progress_.push_back(kLiftoffOnlyFunctionProgress);
default_progress); outstanding_baseline_units_++;
if (default_tiers.baseline_tier != ExecutionTier::kNone) { continue;
outstanding_baseline_units_ += module->num_declared_functions;
}
// Apply compilation hints, if enabled.
if (native_module_->enabled_features().has_compilation_hints()) {
size_t num_hints = std::min(module->compilation_hints.size(),
size_t{module->num_declared_functions});
for (size_t hint_idx = 0; hint_idx < num_hints; ++hint_idx) {
const auto& hint = module->compilation_hints[hint_idx];
ApplyCompilationHintToInitialProgress(hint, hint_idx);
} }
uint8_t function_progress = SetupCompilationProgressForFunction(
lazy_module, native_module_, enabled_features, func_index);
compilation_progress_.push_back(function_progress);
} }
DCHECK_IMPLIES(lazy_module && !prefer_liftoff,
// Account for outstanding wrapper compilation. outstanding_baseline_units_ == 0);
DCHECK_LE(0, outstanding_baseline_units_);
outstanding_baseline_units_ += num_import_wrappers; outstanding_baseline_units_ += num_import_wrappers;
outstanding_export_wrappers_ = num_export_wrappers; outstanding_export_wrappers_ = num_export_wrappers;
...@@ -3288,14 +3276,15 @@ void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder, ...@@ -3288,14 +3276,15 @@ void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization( void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
base::Vector<const int> lazy_functions, base::Vector<const int> lazy_functions,
base::Vector<const int> eager_functions) { base::Vector<const int> liftoff_functions) {
TRACE_EVENT2("v8.wasm", "wasm.CompilationAfterDeserialization", TRACE_EVENT2("v8.wasm", "wasm.CompilationAfterDeserialization",
"num_lazy_functions", lazy_functions.size(), "num_lazy_functions", lazy_functions.size(),
"num_eager_functions", eager_functions.size()); "num_liftoff_functions", liftoff_functions.size());
TimedHistogramScope lazy_compile_time_scope( TimedHistogramScope lazy_compile_time_scope(
counters()->wasm_compile_after_deserialize()); counters()->wasm_compile_after_deserialize());
auto* module = native_module_->module(); auto* module = native_module_->module();
auto enabled_features = native_module_->enabled_features();
base::Optional<CodeSpaceWriteScope> lazy_code_space_write_scope; base::Optional<CodeSpaceWriteScope> lazy_code_space_write_scope;
if (IsLazyModule(module) || !lazy_functions.empty()) { if (IsLazyModule(module) || !lazy_functions.empty()) {
lazy_code_space_write_scope.emplace(native_module_); lazy_code_space_write_scope.emplace(native_module_);
...@@ -3303,53 +3292,41 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization( ...@@ -3303,53 +3292,41 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
{ {
base::MutexGuard guard(&callbacks_mutex_); base::MutexGuard guard(&callbacks_mutex_);
DCHECK(compilation_progress_.empty()); DCHECK(compilation_progress_.empty());
// Initialize the compilation progress as if everything was
// TurboFan-compiled.
constexpr uint8_t kProgressAfterTurbofanDeserialization = constexpr uint8_t kProgressAfterTurbofanDeserialization =
RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) | RequiredBaselineTierField::encode(ExecutionTier::kTurbofan) |
RequiredTopTierField::encode(ExecutionTier::kTurbofan) | RequiredTopTierField::encode(ExecutionTier::kTurbofan) |
ReachedTierField::encode(ExecutionTier::kTurbofan); ReachedTierField::encode(ExecutionTier::kTurbofan);
finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
if (liftoff_functions.empty()) {
// We have to trigger the compilation events to finish compilation.
// Typically the events get triggered when a CompilationUnit finishes, but
// with lazy compilation there are no compilation units.
// The {kFinishedBaselineCompilation} event is needed for module
// compilation to finish.
finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
}
compilation_progress_.assign(module->num_declared_functions, compilation_progress_.assign(module->num_declared_functions,
kProgressAfterTurbofanDeserialization); kProgressAfterTurbofanDeserialization);
// Update compilation state for lazy functions.
constexpr uint8_t kProgressForLazyFunctions =
RequiredBaselineTierField::encode(ExecutionTier::kNone) |
RequiredTopTierField::encode(ExecutionTier::kNone) |
ReachedTierField::encode(ExecutionTier::kNone);
for (auto func_index : lazy_functions) { for (auto func_index : lazy_functions) {
native_module_->UseLazyStub(func_index); native_module_->UseLazyStub(func_index);
compilation_progress_[declared_function_index(module, func_index)] = compilation_progress_[declared_function_index(module, func_index)] =
kProgressForLazyFunctions; SetupCompilationProgressForFunction(/*lazy_function =*/true,
native_module_, enabled_features,
func_index);
} }
for (auto func_index : liftoff_functions) {
// Update compilation state for eagerly compiled functions.
constexpr bool kNotLazy = false;
ExecutionTierPair default_tiers =
GetDefaultTiersPerModule(native_module_, dynamic_tiering_, kNotLazy);
uint8_t progress_for_eager_functions =
RequiredBaselineTierField::encode(default_tiers.baseline_tier) |
RequiredTopTierField::encode(default_tiers.top_tier) |
ReachedTierField::encode(ExecutionTier::kNone);
for (auto func_index : eager_functions) {
// Check that {func_index} is not contained in {lazy_functions}. // Check that {func_index} is not contained in {lazy_functions}.
DCHECK_EQ( DCHECK_EQ(
compilation_progress_[declared_function_index(module, func_index)], compilation_progress_[declared_function_index(module, func_index)],
kProgressAfterTurbofanDeserialization); kProgressAfterTurbofanDeserialization);
// We want to force Liftoff compilation here, as we have a strong hint
// that the function will be needed anyways. Therefore we disable
// lazy compilation.
constexpr bool kNoLazyCompilation = false;
compilation_progress_[declared_function_index(module, func_index)] = compilation_progress_[declared_function_index(module, func_index)] =
progress_for_eager_functions; SetupCompilationProgressForFunction(
} kNoLazyCompilation, native_module_, enabled_features, func_index);
DCHECK_NE(ExecutionTier::kNone, default_tiers.baseline_tier);
outstanding_baseline_units_ += eager_functions.size();
// Export wrappers are compiled synchronously after deserialization, so set
// that as finished already. Baseline compilation is done if we do not have
// any Liftoff functions to compile.
finished_events_.Add(CompilationEvent::kFinishedExportWrappers);
if (eager_functions.empty()) {
finished_events_.Add(CompilationEvent::kFinishedBaselineCompilation);
} }
} }
auto builder = std::make_unique<CompilationUnitBuilder>(native_module_); auto builder = std::make_unique<CompilationUnitBuilder>(native_module_);
......
...@@ -28,7 +28,7 @@ namespace wasm { ...@@ -28,7 +28,7 @@ namespace wasm {
namespace { namespace {
constexpr uint8_t kLazyFunction = 2; constexpr uint8_t kLazyFunction = 2;
constexpr uint8_t kEagerFunction = 3; constexpr uint8_t kLiftoffFunction = 3;
constexpr uint8_t kTurboFanFunction = 4; constexpr uint8_t kTurboFanFunction = 4;
// TODO(bbudge) Try to unify the various implementations of readers and writers // TODO(bbudge) Try to unify the various implementations of readers and writers
...@@ -340,17 +340,17 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { ...@@ -340,17 +340,17 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
// non-relocatable constants. // non-relocatable constants.
if (code->tier() != ExecutionTier::kTurbofan) { if (code->tier() != ExecutionTier::kTurbofan) {
// We check if the function has been executed already. If so, we serialize // We check if the function has been executed already. If so, we serialize
// it as {kEagerFunction} so that upon deserialization the function will // it as {kLiftoffFunction} so that upon deserialization the function will
// get eagerly compiled with Liftoff (if enabled). If the function has not // get compiled with Liftoff eagerly. If the function has not been executed
// been executed yet, we serialize it as {kLazyFunction}, and the function // yet, we serialize it as {kLazyFunction}, and the function will not get
// will not get compiled upon deserialization. // compiled upon deserialization.
NativeModule* native_module = code->native_module(); NativeModule* native_module = code->native_module();
uint32_t budget = uint32_t budget =
native_module->tiering_budget_array()[declared_function_index( native_module->tiering_budget_array()[declared_function_index(
native_module->module(), code->index())]; native_module->module(), code->index())];
writer->Write(budget == static_cast<uint32_t>(FLAG_wasm_tiering_budget) writer->Write(budget == static_cast<uint32_t>(FLAG_wasm_tiering_budget)
? kLazyFunction ? kLazyFunction
: kEagerFunction); : kLiftoffFunction);
return; return;
} }
...@@ -552,8 +552,8 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer { ...@@ -552,8 +552,8 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
return base::VectorOf(lazy_functions_); return base::VectorOf(lazy_functions_);
} }
base::Vector<const int> eager_functions() { base::Vector<const int> liftoff_functions() {
return base::VectorOf(eager_functions_); return base::VectorOf(liftoff_functions_);
} }
private: private:
...@@ -574,7 +574,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer { ...@@ -574,7 +574,7 @@ class V8_EXPORT_PRIVATE NativeModuleDeserializer {
base::Vector<byte> current_code_space_; base::Vector<byte> current_code_space_;
NativeModule::JumpTablesRef current_jump_tables_; NativeModule::JumpTablesRef current_jump_tables_;
std::vector<int> lazy_functions_; std::vector<int> lazy_functions_;
std::vector<int> eager_functions_; std::vector<int> liftoff_functions_;
}; };
class DeserializeCodeTask : public JobTask { class DeserializeCodeTask : public JobTask {
...@@ -714,8 +714,8 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index, ...@@ -714,8 +714,8 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
lazy_functions_.push_back(fn_index); lazy_functions_.push_back(fn_index);
return {}; return {};
} }
if (code_kind == kEagerFunction) { if (code_kind == kLiftoffFunction) {
eager_functions_.push_back(fn_index); liftoff_functions_.push_back(fn_index);
return {}; return {};
} }
...@@ -896,7 +896,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( ...@@ -896,7 +896,7 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
return {}; return {};
} }
shared_native_module->compilation_state()->InitializeAfterDeserialization( shared_native_module->compilation_state()->InitializeAfterDeserialization(
deserializer.lazy_functions(), deserializer.eager_functions()); deserializer.lazy_functions(), deserializer.liftoff_functions());
wasm_engine->UpdateNativeModuleCache(error, &shared_native_module, isolate); wasm_engine->UpdateNativeModuleCache(error, &shared_native_module, isolate);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment