Commit be470c55 authored by Frederik Gossen's avatar Frederik Gossen Committed by Commit Bot

Revert "[wasm-hints] Enabled Lazy Compilation by Hint"

This reverts commit 09fa63a9.

Reason for revert: Falkes on https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20shared/29942

Original change's description:
> [wasm-hints] Enabled Lazy Compilation by Hint
> 
> Hints for lazy compilation are now taken into consideration. If the
> custom hints section suggests lazy compilatin we do so unless the module
> consists of a single function.
> 
> Bug: v8:9003
> Change-Id: Ibdc400453cee20d4d5c814733887b38fb675b220
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535827
> Commit-Queue: Frederik Gossen <frgossen@google.com>
> Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#60557}

TBR=mstarzinger@chromium.org,clemensh@chromium.org,frgossen@google.com

Change-Id: I18dd424fe8cf05f220f7498bb1ebe4b9fce7d240
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9003
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1547668Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60558}
parent 09fa63a9
...@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, ...@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
std::move(result.source_positions), wasm::WasmCode::kFunction, std::move(result.source_positions), wasm::WasmCode::kFunction,
wasm::WasmCode::kOther); wasm::WasmCode::kOther);
// TODO(titzer): add counters for math intrinsic code size / allocation // TODO(titzer): add counters for math intrinsic code size / allocation
return native_module->PublishCode(std::move(wasm_code)).code; return native_module->PublishCode(std::move(wasm_code));
} }
wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
...@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, ...@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
std::move(result.protected_instructions), std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper, std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
wasm::WasmCode::kOther); wasm::WasmCode::kOther);
return native_module->PublishCode(std::move(wasm_code)).code; return native_module->PublishCode(std::move(wasm_code));
} }
wasm::WasmCompilationResult CompileWasmInterpreterEntry( wasm::WasmCompilationResult CompileWasmInterpreterEntry(
...@@ -6243,9 +6243,7 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation( ...@@ -6243,9 +6243,7 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
// TODO(bradnelson): Improve histogram handling of size_t. // TODO(bradnelson): Improve histogram handling of size_t.
counters->wasm_compile_function_peak_memory_bytes()->AddSample( counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size())); static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
auto result = info.ReleaseWasmCompilationResult(); return std::move(*info.ReleaseWasmCompilationResult());
DCHECK_EQ(wasm::ExecutionTier::kOptimized, result->result_tier);
return std::move(*result);
} }
wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation( wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
...@@ -6262,7 +6260,6 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation( ...@@ -6262,7 +6260,6 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_, wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_,
func_body.sig); func_body.sig);
DCHECK(result.succeeded()); DCHECK(result.succeeded());
DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
return result; return result;
} }
......
...@@ -21,7 +21,6 @@ namespace wasm { ...@@ -21,7 +21,6 @@ namespace wasm {
class NativeModule; class NativeModule;
class WasmCode; class WasmCode;
struct WasmCompilationResult;
class WasmError; class WasmError;
enum RuntimeExceptionSupport : bool { enum RuntimeExceptionSupport : bool {
...@@ -121,8 +120,7 @@ class CompilationState { ...@@ -121,8 +120,7 @@ class CompilationState {
bool failed() const; bool failed() const;
void FinishUnit(WasmCompilationResult); void OnFinishedUnit(ExecutionTier, WasmCode*);
void FinishUnits(Vector<WasmCompilationResult>);
private: private:
friend class NativeModule; friend class NativeModule;
......
...@@ -52,7 +52,6 @@ struct WasmCompilationResult { ...@@ -52,7 +52,6 @@ struct WasmCompilationResult {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult); MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
bool succeeded() const { return code_desc.buffer != nullptr; } bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); } operator bool() const { return succeeded(); }
CodeDesc code_desc; CodeDesc code_desc;
......
...@@ -128,7 +128,7 @@ class CompilationStateImpl { ...@@ -128,7 +128,7 @@ class CompilationStateImpl {
// Set the number of compilations unit expected to be executed. Needs to be // Set the number of compilations unit expected to be executed. Needs to be
// set before {AddCompilationUnits} is run, which triggers background // set before {AddCompilationUnits} is run, which triggers background
// compilation. // compilation.
void SetNumberOfFunctionsToCompile(int num_functions, int num_lazy_functions); void SetNumberOfFunctionsToCompile(int num_functions);
// Add the callback function to be called on compilation events. Needs to be // Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all // set before {AddCompilationUnits} is run to ensure that it receives all
...@@ -138,12 +138,10 @@ class CompilationStateImpl { ...@@ -138,12 +138,10 @@ class CompilationStateImpl {
// Inserts new functions to compile and kicks off compilation. // Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits( void AddCompilationUnits(
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units, std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
std::vector<std::unique_ptr<WasmCompilationUnit>>& top_tier_units); std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units);
void AddTopTierCompilationUnit(std::unique_ptr<WasmCompilationUnit>);
std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit(); std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit();
void FinishUnit(WasmCompilationResult); void OnFinishedUnit(ExecutionTier, WasmCode*);
void FinishUnits(Vector<WasmCompilationResult>);
void ReportDetectedFeatures(const WasmFeatures& detected); void ReportDetectedFeatures(const WasmFeatures& detected);
void OnBackgroundTaskStopped(const WasmFeatures& detected); void OnBackgroundTaskStopped(const WasmFeatures& detected);
...@@ -159,8 +157,9 @@ class CompilationStateImpl { ...@@ -159,8 +157,9 @@ class CompilationStateImpl {
bool baseline_compilation_finished() const { bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_); base::MutexGuard guard(&callbacks_mutex_);
DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_); return outstanding_baseline_units_ == 0 ||
return outstanding_baseline_functions_ == 0; (compile_mode_ == CompileMode::kTiering &&
outstanding_tiering_units_ == 0);
} }
CompileMode compile_mode() const { return compile_mode_; } CompileMode compile_mode() const { return compile_mode_; }
...@@ -196,7 +195,7 @@ class CompilationStateImpl { ...@@ -196,7 +195,7 @@ class CompilationStateImpl {
// Protected by {mutex_}: // Protected by {mutex_}:
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_compilation_units_; std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_compilation_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> top_tier_compilation_units_; std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_compilation_units_;
int num_background_tasks_ = 0; int num_background_tasks_ = 0;
...@@ -223,8 +222,8 @@ class CompilationStateImpl { ...@@ -223,8 +222,8 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events. // Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_; std::vector<CompilationState::callback_t> callbacks_;
int outstanding_baseline_functions_ = 0; int outstanding_baseline_units_ = 0;
int outstanding_top_tier_functions_ = 0; int outstanding_tiering_units_ = 0;
// End of fields protected by {callbacks_mutex_}. // End of fields protected by {callbacks_mutex_}.
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
...@@ -276,12 +275,8 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) { ...@@ -276,12 +275,8 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
bool CompilationState::failed() const { return Impl(this)->failed(); } bool CompilationState::failed() const { return Impl(this)->failed(); }
void CompilationState::FinishUnit(WasmCompilationResult result) { void CompilationState::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
Impl(this)->FinishUnit(std::move(result)); Impl(this)->OnFinishedUnit(tier, code);
}
void CompilationState::FinishUnits(Vector<WasmCompilationResult> results) {
Impl(this)->FinishUnits(results);
} }
// static // static
...@@ -295,10 +290,65 @@ std::unique_ptr<CompilationState> CompilationState::New( ...@@ -295,10 +290,65 @@ std::unique_ptr<CompilationState> CompilationState::New(
// End of PIMPL implementation of {CompilationState}. // End of PIMPL implementation of {CompilationState}.
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
void CompileLazy(Isolate* isolate, NativeModule* native_module,
uint32_t func_index) {
Counters* counters = isolate->counters();
HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
DCHECK(!native_module->lazy_compile_frozen());
base::ElapsedTimer compilation_timer;
NativeModuleModificationScope native_module_modification_scope(native_module);
DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
compilation_timer.Start();
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
const uint8_t* module_start = native_module->wire_bytes().start();
const WasmFunction* func = &native_module->module()->functions[func_index];
FunctionBody func_body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
ExecutionTier tier =
WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
WasmCompilationUnit unit(isolate->wasm_engine(), func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
isolate->counters(),
Impl(native_module->compilation_state())->detected_features());
WasmCode* code = native_module->AddCompiledCode(std::move(result));
// During lazy compilation, we should never get compilation errors. The module
// was verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
CHECK(!native_module->compilation_state()->failed());
// The code we just produced should be the one that was requested.
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
double func_kb = 1e-3 * func->code.length();
double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
counters->wasm_lazily_compiled_functions()->Increment();
int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
}
namespace { namespace {
ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint, ExecutionTier apply_hint_to_execution_tier(WasmCompilationHintTier hint,
ExecutionTier default_tier) { ExecutionTier default_tier) {
switch (hint) { switch (hint) {
case WasmCompilationHintTier::kDefault: case WasmCompilationHintTier::kDefault:
return default_tier; return default_tier;
...@@ -312,76 +362,6 @@ ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint, ...@@ -312,76 +362,6 @@ ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
UNREACHABLE(); UNREACHABLE();
} }
const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
uint32_t func_index) {
DCHECK_LE(module->num_imported_functions, func_index);
uint32_t hint_index = func_index - module->num_imported_functions;
const std::vector<WasmCompilationHint>& compilation_hints =
module->compilation_hints;
if (hint_index < compilation_hints.size()) {
return &compilation_hints[hint_index];
}
return nullptr;
}
bool IsLazyCompilation(const WasmModule* module,
const NativeModule* native_module,
const WasmFeatures& enabled_features,
uint32_t func_index) {
if (native_module->lazy_compilation()) return true;
if (enabled_features.compilation_hints) {
const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
return hint != nullptr &&
hint->strategy == WasmCompilationHintStrategy::kLazy;
}
return false;
}
struct ExecutionTierPair {
ExecutionTier baseline_tier;
ExecutionTier top_tier;
};
ExecutionTierPair GetRequestedExecutionTiers(
const WasmModule* module, CompileMode compile_mode,
const WasmFeatures& enabled_features, uint32_t func_index) {
ExecutionTierPair result;
switch (compile_mode) {
case CompileMode::kRegular:
result.baseline_tier =
WasmCompilationUnit::GetDefaultExecutionTier(module);
result.top_tier = result.baseline_tier;
return result;
case CompileMode::kTiering:
// Default tiering behaviour.
result.baseline_tier = ExecutionTier::kBaseline;
result.top_tier = ExecutionTier::kOptimized;
// Check if compilation hints override default tiering behaviour.
if (enabled_features.compilation_hints) {
const WasmCompilationHint* hint =
GetCompilationHint(module, func_index);
if (hint != nullptr) {
result.baseline_tier =
ApplyHintToExecutionTier(hint->first_tier, result.baseline_tier);
result.top_tier =
ApplyHintToExecutionTier(hint->second_tier, result.top_tier);
}
}
// Correct top tier if necessary.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
if (result.baseline_tier > result.top_tier) {
result.top_tier = result.baseline_tier;
}
return result;
}
UNREACHABLE();
}
// The {CompilationUnitBuilder} builds compilation units and stores them in an // The {CompilationUnitBuilder} builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the // internal buffer. The buffer is moved into the working queue of the
// {CompilationStateImpl} when {Commit} is called. // {CompilationStateImpl} when {Commit} is called.
...@@ -394,14 +374,49 @@ class CompilationUnitBuilder { ...@@ -394,14 +374,49 @@ class CompilationUnitBuilder {
default_tier_(WasmCompilationUnit::GetDefaultExecutionTier( default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
native_module->module())) {} native_module->module())) {}
void AddUnits(uint32_t func_index) { void AddUnit(uint32_t func_index) {
ExecutionTierPair tiers = GetRequestedExecutionTiers( switch (compilation_state()->compile_mode()) {
native_module_->module(), compilation_state()->compile_mode(), case CompileMode::kRegular:
native_module_->enabled_features(), func_index); baseline_units_.emplace_back(CreateUnit(func_index, default_tier_));
baseline_units_.emplace_back(CreateUnit(func_index, tiers.baseline_tier)); return;
if (tiers.baseline_tier != tiers.top_tier) { case CompileMode::kTiering:
tiering_units_.emplace_back(CreateUnit(func_index, tiers.top_tier));
// Default tiering behaviour.
ExecutionTier first_tier = ExecutionTier::kBaseline;
ExecutionTier second_tier = ExecutionTier::kOptimized;
// Check if compilation hints override default tiering behaviour.
if (native_module_->enabled_features().compilation_hints) {
// Find compilation hint.
CHECK_LE(native_module_->num_imported_functions(), func_index);
uint32_t hint_index =
func_index - native_module_->num_imported_functions();
const std::vector<WasmCompilationHint>& compilation_hints =
native_module_->module()->compilation_hints;
if (hint_index < compilation_hints.size()) {
WasmCompilationHint hint = compilation_hints[hint_index];
// Apply compilation hint.
first_tier =
apply_hint_to_execution_tier(hint.first_tier, first_tier);
second_tier =
apply_hint_to_execution_tier(hint.second_tier, second_tier);
}
}
// Create compilation units and suppress duplicate compilation.
baseline_units_.emplace_back(
CreateUnit(func_index, std::move(first_tier)));
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
if (first_tier < second_tier) {
tiering_units_.emplace_back(
CreateUnit(func_index, std::move(second_tier)));
}
return;
} }
UNREACHABLE();
} }
bool Commit() { bool Commit() {
...@@ -434,76 +449,11 @@ class CompilationUnitBuilder { ...@@ -434,76 +449,11 @@ class CompilationUnitBuilder {
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_; std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
}; };
} // namespace bool compile_lazy(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
void CompileLazy(Isolate* isolate, NativeModule* native_module, (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
uint32_t func_index) {
Counters* counters = isolate->counters();
HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
DCHECK(!native_module->lazy_compile_frozen());
base::ElapsedTimer compilation_timer;
NativeModuleModificationScope native_module_modification_scope(native_module);
DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
compilation_timer.Start();
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
const uint8_t* module_start = native_module->wire_bytes().start();
const WasmFunction* func = &native_module->module()->functions[func_index];
FunctionBody func_body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module->module(), compilation_state->compile_mode(),
native_module->enabled_features(), func_index);
WasmCompilationUnit baseline_unit(isolate->wasm_engine(), func_index,
tiers.baseline_tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
&env, compilation_state->GetWireBytesStorage(), isolate->counters(),
compilation_state->detected_features());
WasmCodeUpdate update = native_module->AddCompiledCode(std::move(result));
WasmCode* code = update.code;
if (tiers.baseline_tier < tiers.top_tier) {
auto tiering_unit = base::make_unique<WasmCompilationUnit>(
isolate->wasm_engine(), func_index, tiers.top_tier);
compilation_state->AddTopTierCompilationUnit(std::move(tiering_unit));
}
// During lazy compilation, we should never get compilation errors. The module
// was verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
CHECK(!compilation_state->failed());
// The code we just produced should be the one that was requested.
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
double func_kb = 1e-3 * func->code.length();
double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
counters->wasm_lazily_compiled_functions()->Increment();
int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
} }
namespace {
void RecordStats(const Code code, Counters* counters) { void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size()); counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length()); counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
...@@ -525,110 +475,32 @@ bool FetchAndExecuteCompilationUnit(CompilationEnv* env, ...@@ -525,110 +475,32 @@ bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
std::unique_ptr<WasmCompilationUnit> unit = std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextCompilationUnit(); compilation_state->GetNextCompilationUnit();
if (unit == nullptr) return false; if (unit == nullptr) return false;
WasmCompilationResult result = unit->ExecuteCompilation( WasmCompilationResult result = unit->ExecuteCompilation(
env, compilation_state->GetWireBytesStorage(), counters, detected); env, compilation_state->GetWireBytesStorage(), counters, detected);
if (result.succeeded()) { if (result.succeeded()) {
compilation_state->FinishUnit(std::move(result)); WasmCode* code = native_module->AddCompiledCode(std::move(result));
compilation_state->OnFinishedUnit(result.requested_tier, code);
} else { } else {
compilation_state->SetError(); compilation_state->SetError();
} }
return true;
}
void ValidateSequentially(Counters* counters, AccountingAllocator* allocator,
NativeModule* native_module, uint32_t func_index,
ErrorThrower* thrower) {
DCHECK(!thrower->error());
const WasmModule* module = native_module->module();
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
const WasmFunction* func = &module->functions[func_index];
Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
DecodeResult result;
{
auto time_counter = SELECT_WASM_COUNTER(counters, module->origin,
wasm_decode, function_time);
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
WasmFeatures detected;
result = VerifyWasmCode(allocator, native_module->enabled_features(),
module, &detected, body);
}
if (result.failed()) {
WasmName name = wire_bytes.GetNameOrNull(func, module);
if (name.start() == nullptr) {
thrower->CompileError("Compiling function #%d failed: %s @+%u",
func_index, result.error().message().c_str(),
result.error().offset());
} else {
TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
func_index, name.length(), name.start(),
result.error().message().c_str(),
result.error().offset());
}
}
}
void ValidateSequentially(Counters* counters, AccountingAllocator* allocator,
NativeModule* native_module, ErrorThrower* thrower) {
DCHECK(!thrower->error());
uint32_t start = native_module->module()->num_imported_functions; return true;
uint32_t end = start + native_module->module()->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
ValidateSequentially(counters, allocator, native_module, func_index,
thrower);
if (thrower->error()) break;
}
} }
// TODO(wasm): This function should not depend on an isolate. Internally, it is void InitializeCompilationUnits(NativeModule* native_module,
// used for the ErrorThrower only.
bool InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module,
WasmEngine* wasm_engine) { WasmEngine* wasm_engine) {
// Set number of functions that must be compiled to consider the module fully
// compiled.
auto wasm_module = native_module->module();
int num_functions = wasm_module->num_declared_functions;
DCHECK_IMPLIES(!native_module->enabled_features().compilation_hints,
wasm_module->num_lazy_compilation_hints == 0);
int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
compilation_state->SetNumberOfFunctionsToCompile(num_functions,
num_lazy_functions);
ErrorThrower thrower(isolate, "WebAssembly.compile()");
ModuleWireBytes wire_bytes(native_module->wire_bytes()); ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = native_module->module(); const WasmModule* module = native_module->module();
CompilationUnitBuilder builder(native_module, wasm_engine); CompilationUnitBuilder builder(native_module, wasm_engine);
uint32_t start = module->num_imported_functions; uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions; uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) { for (uint32_t i = start; i < end; ++i) {
if (IsLazyCompilation(module, native_module, builder.AddUnit(i);
native_module->enabled_features(), func_index)) {
ValidateSequentially(isolate->counters(), isolate->allocator(),
native_module, func_index, &thrower);
native_module->UseLazyStub(func_index);
} else {
builder.AddUnits(func_index);
}
} }
builder.Commit(); builder.Commit();
// Handle potential errors internally.
if (thrower.error()) {
thrower.Reset();
return false;
}
return true;
} }
void CompileInParallel(Isolate* isolate, NativeModule* native_module) { void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
...@@ -651,19 +523,16 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) { ...@@ -651,19 +523,16 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
CompilationStateImpl* compilation_state = CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state()); Impl(native_module->compilation_state());
DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions); DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
int num_wasm_functions =
static_cast<int>(native_module->module()->num_declared_functions);
compilation_state->SetNumberOfFunctionsToCompile(num_wasm_functions);
// 1) The main thread allocates a compilation unit for each wasm function // 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units} within the // and stores them in the vector {compilation_units} within the
// {compilation_state}. By adding units to the {compilation_state}, new // {compilation_state}. By adding units to the {compilation_state}, new
// {BackgroundCompileTask} instances are spawned which run on // {BackgroundCompileTask} instances are spawned which run on
// background threads. // background threads.
bool success = InitializeCompilationUnits(isolate, native_module, InitializeCompilationUnits(native_module, isolate->wasm_engine());
isolate->wasm_engine());
if (!success) {
// TODO(frgossen): Add test coverage for this path.
DCHECK(native_module->enabled_features().compilation_hints);
compilation_state->SetError();
}
// 2) The background threads and the main thread pick one compilation unit at // 2) The background threads and the main thread pick one compilation unit at
// a time and execute the parallel phase of the compilation unit. // a time and execute the parallel phase of the compilation unit.
...@@ -702,13 +571,54 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module) { ...@@ -702,13 +571,54 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module) {
UpdateFeatureUseCounts(isolate, detected); UpdateFeatureUseCounts(isolate, detected);
} }
void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
ErrorThrower* thrower) {
DCHECK(!thrower->error());
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
const WasmModule* module = native_module->module();
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t i = start; i < end; ++i) {
const WasmFunction* func = &module->functions[i];
Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
DecodeResult result;
{
auto time_counter = SELECT_WASM_COUNTER(
isolate->counters(), module->origin, wasm_decode, function_time);
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
WasmFeatures detected;
result = VerifyWasmCode(isolate->allocator(),
native_module->enabled_features(), module,
&detected, body);
}
if (result.failed()) {
WasmName name = wire_bytes.GetNameOrNull(func, module);
if (name.start() == nullptr) {
thrower->CompileError("Compiling function #%d failed: %s @+%u", i,
result.error().message().c_str(),
result.error().offset());
} else {
TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
i, name.length(), name.start(),
result.error().message().c_str(),
result.error().offset());
}
break;
}
}
}
void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower, void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
const WasmModule* wasm_module, const WasmModule* wasm_module,
NativeModule* native_module) { NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes()); ModuleWireBytes wire_bytes(native_module->wire_bytes());
if (FLAG_wasm_lazy_compilation || if (compile_lazy(wasm_module)) {
(FLAG_asm_wasm_lazy_compilation && wasm_module->origin == kAsmJsOrigin)) {
if (wasm_module->origin == kWasmOrigin) { if (wasm_module->origin == kWasmOrigin) {
// Validate wasm modules for lazy compilation. Don't validate asm.js // Validate wasm modules for lazy compilation. Don't validate asm.js
// modules, they are valid by construction (otherwise a CHECK will fail // modules, they are valid by construction (otherwise a CHECK will fail
...@@ -716,12 +626,11 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower, ...@@ -716,12 +626,11 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// TODO(clemensh): According to the spec, we can actually skip validation // TODO(clemensh): According to the spec, we can actually skip validation
// at module creation time, and return a function that always traps at // at module creation time, and return a function that always traps at
// (lazy) compilation time. // (lazy) compilation time.
ValidateSequentially(isolate->counters(), isolate->allocator(), ValidateSequentially(isolate, native_module, thrower);
native_module, thrower);
if (thrower->error()) return; if (thrower->error()) return;
} }
native_module->set_lazy_compilation(true);
native_module->UseLazyStubs(); native_module->SetLazyBuiltin();
} else { } else {
size_t funcs_to_compile = size_t funcs_to_compile =
wasm_module->functions.size() - wasm_module->num_imported_functions; wasm_module->functions.size() - wasm_module->num_imported_functions;
...@@ -737,8 +646,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower, ...@@ -737,8 +646,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
} }
auto* compilation_state = Impl(native_module->compilation_state()); auto* compilation_state = Impl(native_module->compilation_state());
if (compilation_state->failed()) { if (compilation_state->failed()) {
ValidateSequentially(isolate->counters(), isolate->allocator(), ValidateSequentially(isolate, native_module, thrower);
native_module, thrower);
CHECK(thrower->error()); CHECK(thrower->error());
} }
} }
...@@ -790,9 +698,26 @@ class BackgroundCompileTask : public CancelableTask { ...@@ -790,9 +698,26 @@ class BackgroundCompileTask : public CancelableTask {
auto publish_results = auto publish_results =
[&results_to_publish](BackgroundCompileScope* compile_scope) { [&results_to_publish](BackgroundCompileScope* compile_scope) {
if (results_to_publish.empty()) return; if (results_to_publish.empty()) return;
compile_scope->compilation_state()->FinishUnits( // TODO(clemensh): Refactor {OnFinishedUnit} and remove this.
VectorOf(results_to_publish)); std::vector<ExecutionTier> requested_tiers;
requested_tiers.reserve(results_to_publish.size());
for (auto& result : results_to_publish) {
requested_tiers.push_back(result.requested_tier);
}
std::vector<WasmCode*> generated_code =
compile_scope->native_module()->AddCompiledCode(
VectorOf(results_to_publish));
results_to_publish.clear(); results_to_publish.clear();
// Account for the finished compilation units.
// TODO(clemensh): This takes a lock on each invokation. Only do this
// once and pass accumulated counts.
DCHECK_EQ(generated_code.size(), requested_tiers.size());
for (size_t i = 0; i < generated_code.size(); ++i) {
compile_scope->compilation_state()->OnFinishedUnit(
requested_tiers[i], generated_code[i]);
}
}; };
bool compilation_failed = false; bool compilation_failed = false;
...@@ -1085,8 +1010,7 @@ void AsyncCompileJob::DecodeFailed(const WasmError& error) { ...@@ -1085,8 +1010,7 @@ void AsyncCompileJob::DecodeFailed(const WasmError& error) {
void AsyncCompileJob::AsyncCompileFailed() { void AsyncCompileJob::AsyncCompileFailed() {
ErrorThrower thrower(isolate_, "WebAssembly.compile()"); ErrorThrower thrower(isolate_, "WebAssembly.compile()");
ValidateSequentially(isolate_->counters(), isolate_->allocator(), ValidateSequentially(isolate_, native_module_.get(), &thrower);
native_module_.get(), &thrower);
DCHECK(thrower.error()); DCHECK(thrower.error());
// {job} keeps the {this} pointer alive. // {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job = std::shared_ptr<AsyncCompileJob> job =
...@@ -1352,15 +1276,11 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep { ...@@ -1352,15 +1276,11 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// InitializeCompilationUnits always returns 0 for streaming compilation, // InitializeCompilationUnits always returns 0 for streaming compilation,
// then DoAsync would do the same as NextStep already. // then DoAsync would do the same as NextStep already.
compilation_state->SetNumberOfFunctionsToCompile(
module_->num_declared_functions);
// Add compilation units and kick off compilation. // Add compilation units and kick off compilation.
auto isolate = job->isolate(); InitializeCompilationUnits(job->native_module_.get(),
bool success = InitializeCompilationUnits( job->isolate()->wasm_engine());
isolate, job->native_module_.get(), isolate->wasm_engine());
if (!success) {
// TODO(frgossen): Add test coverage for this path.
DCHECK(job->native_module_->enabled_features().compilation_hints);
job->DoSync<CompileFailed>();
}
} }
} }
}; };
...@@ -1526,18 +1446,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader( ...@@ -1526,18 +1446,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// task. // task.
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>( job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false); decoder_.shared_module(), false);
job_->native_module_->compilation_state()->SetWireBytesStorage(
std::move(wire_bytes_storage));
auto* compilation_state = Impl(job_->native_module_->compilation_state()); auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage)); compilation_state->SetNumberOfFunctionsToCompile(functions_count);
// Set number of functions that must be compiled to consider the module fully
// compiled.
auto wasm_module = job_->native_module_->module();
int num_functions = wasm_module->num_declared_functions;
DCHECK_IMPLIES(!job_->native_module_->enabled_features().compilation_hints,
wasm_module->num_lazy_compilation_hints == 0);
int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
compilation_state->SetNumberOfFunctionsToCompile(num_functions,
num_lazy_functions);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the // Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish. // AsyncStreamingProcessor have to finish.
...@@ -1555,30 +1468,11 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes, ...@@ -1555,30 +1468,11 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
decoder_.DecodeFunctionBody( decoder_.DecodeFunctionBody(
num_functions_, static_cast<uint32_t>(bytes.length()), offset, false); num_functions_, static_cast<uint32_t>(bytes.length()), offset, false);
uint32_t func_index = int index = num_functions_ + decoder_.module()->num_imported_functions;
num_functions_ + decoder_.module()->num_imported_functions; compilation_unit_builder_->AddUnit(index);
NativeModule* native_module = job_->native_module_.get();
if (IsLazyCompilation(native_module->module(), native_module,
native_module->enabled_features(), func_index)) {
ErrorThrower thrower(job_->isolate(), "WebAssembly.compile()");
auto counters = job_->isolate()->counters();
auto allocator = job_->isolate()->allocator();
ValidateSequentially(counters, allocator, native_module, func_index,
&thrower);
native_module->UseLazyStub(func_index);
if (thrower.error()) {
// TODO(frgossen): Add test coverage for this path.
DCHECK(native_module->enabled_features().compilation_hints);
thrower.Reset();
return false;
}
} else {
compilation_unit_builder_->AddUnits(func_index);
}
++num_functions_; ++num_functions_;
// This method always succeeds. The return value is necessary to comply with
// the StreamingProcessor interface.
return true; return true;
} }
...@@ -1678,14 +1572,14 @@ void CompilationStateImpl::AbortCompilation() { ...@@ -1678,14 +1572,14 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear(); callbacks_.clear();
} }
void CompilationStateImpl::SetNumberOfFunctionsToCompile( void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
int num_functions, int num_lazy_functions) {
DCHECK(!failed()); DCHECK(!failed());
base::MutexGuard guard(&callbacks_mutex_); base::MutexGuard guard(&callbacks_mutex_);
outstanding_baseline_units_ = num_functions;
int num_functions_to_compile = num_functions - num_lazy_functions; if (compile_mode_ == CompileMode::kTiering) {
outstanding_baseline_functions_ = num_functions_to_compile; outstanding_tiering_units_ = num_functions;
outstanding_top_tier_functions_ = num_functions_to_compile; }
} }
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) { void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
...@@ -1695,146 +1589,98 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) { ...@@ -1695,146 +1589,98 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
void CompilationStateImpl::AddCompilationUnits( void CompilationStateImpl::AddCompilationUnits(
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units, std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
std::vector<std::unique_ptr<WasmCompilationUnit>>& top_tier_units) { std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units) {
{ {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
DCHECK_LE(tiering_units.size(), baseline_units.size());
DCHECK_IMPLIES(compile_mode_ == CompileMode::kTiering &&
!native_module_->enabled_features().compilation_hints,
tiering_units.size() == baseline_units.size());
DCHECK_IMPLIES(compile_mode_ == CompileMode::kTiering &&
!native_module_->enabled_features().compilation_hints,
tiering_units.back()->tier() == ExecutionTier::kOptimized);
DCHECK_IMPLIES(compile_mode_ == CompileMode::kRegular, DCHECK_IMPLIES(compile_mode_ == CompileMode::kRegular,
top_tier_compilation_units_.empty()); tiering_compilation_units_.empty());
baseline_compilation_units_.insert( baseline_compilation_units_.insert(
baseline_compilation_units_.end(), baseline_compilation_units_.end(),
std::make_move_iterator(baseline_units.begin()), std::make_move_iterator(baseline_units.begin()),
std::make_move_iterator(baseline_units.end())); std::make_move_iterator(baseline_units.end()));
if (!top_tier_units.empty()) { if (!tiering_units.empty()) {
top_tier_compilation_units_.insert( tiering_compilation_units_.insert(
top_tier_compilation_units_.end(), tiering_compilation_units_.end(),
std::make_move_iterator(top_tier_units.begin()), std::make_move_iterator(tiering_units.begin()),
std::make_move_iterator(top_tier_units.end())); std::make_move_iterator(tiering_units.end()));
} }
} }
RestartBackgroundTasks(); RestartBackgroundTasks();
} }
void CompilationStateImpl::AddTopTierCompilationUnit(
std::unique_ptr<WasmCompilationUnit> unit) {
{
base::MutexGuard guard(&mutex_);
DCHECK_EQ(compile_mode_, CompileMode::kTiering);
DCHECK(FLAG_wasm_lazy_compilation || FLAG_asm_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
top_tier_compilation_units_.emplace_back(std::move(unit));
}
RestartBackgroundTasks();
}
std::unique_ptr<WasmCompilationUnit> std::unique_ptr<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit() { CompilationStateImpl::GetNextCompilationUnit() {
base::MutexGuard guard(&mutex_); base::MutexGuard guard(&mutex_);
std::vector<std::unique_ptr<WasmCompilationUnit>>* units = nullptr; std::vector<std::unique_ptr<WasmCompilationUnit>>& units =
baseline_compilation_units_.empty() ? tiering_compilation_units_
: baseline_compilation_units_;
if (!baseline_compilation_units_.empty()) { if (!units.empty()) {
units = &baseline_compilation_units_; std::unique_ptr<WasmCompilationUnit> unit = std::move(units.back());
} else if (!top_tier_compilation_units_.empty()) { units.pop_back();
units = &top_tier_compilation_units_; return unit;
} else {
return std::unique_ptr<WasmCompilationUnit>();
} }
DCHECK_NOT_NULL(units);
DCHECK(!units->empty());
std::unique_ptr<WasmCompilationUnit> unit = std::move(units->back());
units->pop_back();
return unit;
}
void CompilationStateImpl::FinishUnit(WasmCompilationResult result) { return std::unique_ptr<WasmCompilationUnit>();
FinishUnits({&result, 1});
} }
void CompilationStateImpl::FinishUnits( void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
Vector<WasmCompilationResult> compilation_results) { // This mutex guarantees that events happen in the right order.
base::MutexGuard guard(&callbacks_mutex_); base::MutexGuard guard(&callbacks_mutex_);
// Assume an order of execution tiers that represents the quality of their // If we are *not* compiling in tiering mode, then all units are counted as
// generated code. // baseline units.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline && bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
ExecutionTier::kBaseline < ExecutionTier::kOptimized, bool is_tiering_unit = is_tiering_mode && tier == ExecutionTier::kOptimized;
"Assume an order on execution tiers");
// Sanity check: If we are not in tiering mode, there cannot be outstanding
auto module = native_module_->module(); // tiering units.
auto enabled_features = native_module_->enabled_features(); DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
std::vector<WasmCodeUpdate> code_update_vector =
native_module_->AddCompiledCode(compilation_results); bool baseline_finished = false;
bool tiering_finished = false;
for (WasmCodeUpdate& code_update : code_update_vector) { if (is_tiering_unit) {
DCHECK_NOT_NULL(code_update.code); DCHECK_LT(0, outstanding_tiering_units_);
DCHECK(code_update.tier.has_value()); --outstanding_tiering_units_;
native_module_->engine()->LogCode(code_update.code); tiering_finished = outstanding_tiering_units_ == 0;
// If baseline compilation has not finished yet, then also trigger
uint32_t func_index = code_update.code->index(); // {kFinishedBaselineCompilation}.
ExecutionTierPair requested_tiers = GetRequestedExecutionTiers( baseline_finished = tiering_finished && outstanding_baseline_units_ > 0;
module, compile_mode(), enabled_features, func_index); } else {
DCHECK_LT(0, outstanding_baseline_units_);
// Reconstruct state before code update. --outstanding_baseline_units_;
bool had_reached_baseline = code_update.prior_tier.has_value(); // If we are in tiering mode and tiering finished before, then do not
bool had_reached_top_tier = // trigger baseline finished.
code_update.prior_tier.has_value() && baseline_finished = outstanding_baseline_units_ == 0 &&
code_update.prior_tier.value() >= requested_tiers.top_tier; (!is_tiering_mode || outstanding_tiering_units_ > 0);
DCHECK_IMPLIES(had_reached_baseline, code_update.prior_tier.has_value() && // If we are not tiering, then we also trigger the "top tier finished"
code_update.prior_tier.value() >= // event when baseline compilation is finished.
requested_tiers.baseline_tier); tiering_finished = baseline_finished && !is_tiering_mode;
}
// Conclude whether we are reaching baseline or top tier.
bool reaches_baseline = !had_reached_baseline; if (baseline_finished) {
bool reaches_top_tier = for (auto& callback : callbacks_)
!had_reached_top_tier && callback(CompilationEvent::kFinishedBaselineCompilation);
code_update.tier.value() >= requested_tiers.top_tier; }
DCHECK_IMPLIES(reaches_baseline, if (tiering_finished) {
code_update.tier.value() >= requested_tiers.baseline_tier); for (auto& callback : callbacks_)
DCHECK_IMPLIES(reaches_top_tier, had_reached_baseline || reaches_baseline); callback(CompilationEvent::kFinishedTopTierCompilation);
// Clear the callbacks because no more events will be delivered.
// Remember state before update. callbacks_.clear();
bool had_completed_baseline_compilation =
outstanding_baseline_functions_ == 0;
bool had_completed_top_tier_compilation =
outstanding_top_tier_functions_ == 0;
// Update state.
if (!IsLazyCompilation(module, native_module_, enabled_features,
func_index)) {
if (reaches_baseline) outstanding_baseline_functions_--;
if (reaches_top_tier) outstanding_top_tier_functions_--;
}
DCHECK_LE(0, outstanding_baseline_functions_);
DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
// Conclude if we are completing baseline or top tier compilation.
bool completes_baseline_compilation = !had_completed_baseline_compilation &&
outstanding_baseline_functions_ == 0;
bool completes_top_tier_compilation = !had_completed_top_tier_compilation &&
outstanding_top_tier_functions_ == 0;
DCHECK_IMPLIES(
completes_top_tier_compilation,
had_completed_baseline_compilation || completes_baseline_compilation);
// Trigger callbacks.
if (completes_baseline_compilation) {
for (auto& callback : callbacks_)
callback(CompilationEvent::kFinishedBaselineCompilation);
}
if (completes_top_tier_compilation) {
for (auto& callback : callbacks_)
callback(CompilationEvent::kFinishedTopTierCompilation);
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
}
} }
if (code != nullptr) native_module_->engine()->LogCode(code);
} }
void CompilationStateImpl::RestartBackgroundCompileTask() { void CompilationStateImpl::RestartBackgroundCompileTask() {
...@@ -1884,7 +1730,7 @@ void CompilationStateImpl::RestartBackgroundTasks() { ...@@ -1884,7 +1730,7 @@ void CompilationStateImpl::RestartBackgroundTasks() {
DCHECK_LE(num_background_tasks_, max_background_tasks_); DCHECK_LE(num_background_tasks_, max_background_tasks_);
if (num_background_tasks_ == max_background_tasks_) return; if (num_background_tasks_ == max_background_tasks_) return;
size_t num_compilation_units = size_t num_compilation_units =
baseline_compilation_units_.size() + top_tier_compilation_units_.size(); baseline_compilation_units_.size() + tiering_compilation_units_.size();
num_restart = max_background_tasks_ - num_background_tasks_; num_restart = max_background_tasks_ - num_background_tasks_;
DCHECK_LE(0, num_restart); DCHECK_LE(0, num_restart);
if (num_compilation_units < static_cast<size_t>(num_restart)) { if (num_compilation_units < static_cast<size_t>(num_restart)) {
......
...@@ -1008,10 +1008,7 @@ class ModuleDecoderImpl : public Decoder { ...@@ -1008,10 +1008,7 @@ class ModuleDecoderImpl : public Decoder {
} }
// Decode sequence of compilation hints. // Decode sequence of compilation hints.
if (decoder.ok()) { if (decoder.ok()) module_->compilation_hints.reserve(hint_count);
module_->compilation_hints.reserve(hint_count);
module_->num_lazy_compilation_hints = 0;
}
for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) { for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
TRACE("DecodeCompilationHints[%d] module+%d\n", i, TRACE("DecodeCompilationHints[%d] module+%d\n", i,
static_cast<int>(pc_ - start_)); static_cast<int>(pc_ - start_));
...@@ -1050,18 +1047,12 @@ class ModuleDecoderImpl : public Decoder { ...@@ -1050,18 +1047,12 @@ class ModuleDecoderImpl : public Decoder {
} }
// Happily accept compilation hint. // Happily accept compilation hint.
if (decoder.ok()) { if (decoder.ok()) module_->compilation_hints.push_back(std::move(hint));
if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
module_->num_lazy_compilation_hints++;
}
module_->compilation_hints.push_back(std::move(hint));
}
} }
// If section was invalid reset compilation hints. // If section was invalid reset compilation hints.
if (decoder.failed()) { if (decoder.failed()) {
module_->compilation_hints.clear(); module_->compilation_hints.clear();
module_->num_lazy_compilation_hints = 0;
} }
// @TODO(frgossen) Skip the whole compilation hints section in the outer // @TODO(frgossen) Skip the whole compilation hints section in the outer
......
...@@ -433,25 +433,19 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) { ...@@ -433,25 +433,19 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
return AddAndPublishAnonymousCode(code, WasmCode::kFunction); return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
} }
void NativeModule::UseLazyStubs() { void NativeModule::SetLazyBuiltin() {
uint32_t start = module_->num_imported_functions; uint32_t num_wasm_functions = module_->num_declared_functions;
uint32_t end = start + module_->num_declared_functions; if (num_wasm_functions == 0) return;
for (uint32_t func_index = start; func_index < end; func_index++) { // Fill the jump table with jumps to the lazy compile stub.
UseLazyStub(func_index); Address lazy_compile_target = runtime_stub_entry(WasmCode::kWasmCompileLazy);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), i,
i + module_->num_imported_functions, lazy_compile_target,
WasmCode::kNoFlushICache);
} }
} FlushInstructionCache(jump_table_->instructions().start(),
jump_table_->instructions().size());
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), slot_index, func_index,
runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
} }
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS} // TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
...@@ -587,7 +581,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, ...@@ -587,7 +581,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
new_code->MaybePrint(name); new_code->MaybePrint(name);
new_code->Validate(); new_code->Validate();
return PublishCode(std::move(new_code)).code; return PublishCode(std::move(new_code));
} }
std::unique_ptr<WasmCode> NativeModule::AddCode( std::unique_ptr<WasmCode> NativeModule::AddCode(
...@@ -673,98 +667,39 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace( ...@@ -673,98 +667,39 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
return code; return code;
} }
WasmCodeUpdate NativeModule::PublishCode(std::unique_ptr<WasmCode> code) { WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code)); return PublishCodeLocked(std::move(code));
} }
namespace { WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
// TODO(frgossen): We should merge ExecutionTier with WasmCode::Tier.
base::Optional<ExecutionTier> GetExecutionTier(WasmCode* code) {
if (code == nullptr) return {};
switch (code->tier()) {
case WasmCode::Tier::kLiftoff:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kBaseline;
case WasmCode::Tier::kTurbofan:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kOptimized;
case WasmCode::Tier::kOther:
if (code->kind() == WasmCode::Kind::kInterpreterEntry)
return ExecutionTier::kInterpreter;
return {};
}
UNREACHABLE();
}
} // namespace
WasmCodeUpdate NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here. // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock()); DCHECK(!allocation_mutex_.TryLock());
base::Optional<ExecutionTier> prior_tier;
base::Optional<ExecutionTier> tier;
// Skip publishing code if there is an active redirection to the interpreter // Skip publishing code if there is an active redirection to the interpreter
// for the given function index, in order to preserve the redirection. // for the given function index, in order to preserve the redirection.
if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) { if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) {
DCHECK_LT(code->index(), num_functions()); DCHECK_LT(code->index(), num_functions());
DCHECK_LE(module_->num_imported_functions, code->index()); DCHECK_LE(module_->num_imported_functions, code->index());
// Assume an order of execution tiers that represents the quality of their // Update code table, except for interpreter entries that would overwrite
// generated code. // existing code.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
// Update code table but avoid to fall back to less optimized code. We use
// the new code if it was compiled with a higher tier and also if we cannot
// determine the tier.
uint32_t slot_idx = code->index() - module_->num_imported_functions; uint32_t slot_idx = code->index() - module_->num_imported_functions;
WasmCode* prior_code = code_table_[slot_idx]; if (code->kind() != WasmCode::kInterpreterEntry ||
prior_tier = GetExecutionTier(prior_code); code_table_[slot_idx] == nullptr) {
tier = GetExecutionTier(code.get());
bool code_upgrade = !prior_tier.has_value() || !tier.has_value() ||
prior_tier.value() < tier.value();
if (code_upgrade) {
code_table_[slot_idx] = code.get(); code_table_[slot_idx] = code.get();
} }
// Patch jump table. Ensure to use optimized code and interpreter entries. // Patch jump table.
if (code_upgrade || code->kind_ == WasmCode::Kind::kInterpreterEntry) { JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
JumpTableAssembler::PatchJumpTableSlot( slot_idx, code->instruction_start(),
jump_table_->instruction_start(), slot_idx, code->instruction_start(), WasmCode::kFlushICache);
WasmCode::kFlushICache);
}
} }
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) { if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
SetInterpreterRedirection(code->index()); SetInterpreterRedirection(code->index());
} }
WasmCodeUpdate update; WasmCode* ret = code.get();
update.code = code.get();
update.tier = tier;
update.prior_tier = prior_tier;
owned_code_.emplace_back(std::move(code)); owned_code_.emplace_back(std::move(code));
return update; return ret;
} }
WasmCode* NativeModule::AddDeserializedCode( WasmCode* NativeModule::AddDeserializedCode(
...@@ -791,7 +726,7 @@ WasmCode* NativeModule::AddDeserializedCode( ...@@ -791,7 +726,7 @@ WasmCode* NativeModule::AddDeserializedCode(
// Note: we do not flush the i-cache here, since the code needs to be // Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later. // relocated anyway. The caller is responsible for flushing the i-cache later.
return PublishCode(std::move(code)).code; return PublishCode(std::move(code));
} }
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
...@@ -823,7 +758,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) { ...@@ -823,7 +758,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
OwnedVector<const uint8_t>{}, // source_pos OwnedVector<const uint8_t>{}, // source_pos
WasmCode::kJumpTable, // kind WasmCode::kJumpTable, // kind
WasmCode::kOther}}; // tier WasmCode::kOther}}; // tier
return PublishCode(std::move(code)).code; return PublishCode(std::move(code));
} }
Vector<byte> NativeModule::AllocateForCode(size_t size) { Vector<byte> NativeModule::AllocateForCode(size_t size) {
...@@ -1267,11 +1202,34 @@ void NativeModule::SampleCodeSize( ...@@ -1267,11 +1202,34 @@ void NativeModule::SampleCodeSize(
histogram->AddSample(code_size_mb); histogram->AddSample(code_size_mb);
} }
WasmCodeUpdate NativeModule::AddCompiledCode(WasmCompilationResult result) { namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
} // namespace
WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
return AddCompiledCode({&result, 1})[0]; return AddCompiledCode({&result, 1})[0];
} }
std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode( std::vector<WasmCode*> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) { Vector<WasmCompilationResult> results) {
DCHECK(!results.is_empty()); DCHECK(!results.is_empty());
// First, allocate code space for all the results. // First, allocate code space for all the results.
...@@ -1301,15 +1259,16 @@ std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode( ...@@ -1301,15 +1259,16 @@ std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode(
DCHECK_EQ(0, code_space.size()); DCHECK_EQ(0, code_space.size());
// Under the {allocation_mutex_}, publish the code. // Under the {allocation_mutex_}, publish the code.
std::vector<WasmCodeUpdate> code_updates; std::vector<WasmCode*> returned_code;
code_updates.reserve(results.size()); returned_code.reserve(results.size());
{ {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
for (auto& result : generated_code) for (auto& result : generated_code) {
code_updates.push_back(PublishCodeLocked(std::move(result))); returned_code.push_back(PublishCodeLocked(std::move(result)));
}
} }
return code_updates; return returned_code;
} }
void NativeModule::FreeCode(Vector<WasmCode* const> codes) { void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <vector> #include <vector>
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h" #include "src/builtins/builtins-definitions.h"
#include "src/handles.h" #include "src/handles.h"
#include "src/trap-handler/trap-handler.h" #include "src/trap-handler/trap-handler.h"
...@@ -73,12 +72,6 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final { ...@@ -73,12 +72,6 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool); DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
}; };
struct WasmCodeUpdate {
WasmCode* code = nullptr;
base::Optional<ExecutionTier> tier;
base::Optional<ExecutionTier> prior_tier;
};
class V8_EXPORT_PRIVATE WasmCode final { class V8_EXPORT_PRIVATE WasmCode final {
public: public:
enum Kind { enum Kind {
...@@ -277,9 +270,9 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -277,9 +270,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {PublishCode} makes the code available to the system by entering it into // {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the // the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object. // given {WasmCode} object.
WasmCodeUpdate PublishCode(std::unique_ptr<WasmCode>); WasmCode* PublishCode(std::unique_ptr<WasmCode>);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}. // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCodeUpdate PublishCodeLocked(std::unique_ptr<WasmCode>); WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode( WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots, uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
...@@ -295,12 +288,10 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -295,12 +288,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Adds anonymous code for testing purposes. // Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code); WasmCode* AddCodeForTesting(Handle<Code> code);
// Use this to setup lazy compilation for the entire module ({UseLazyStubs}) // Use this to start lazy compilation for the entire module. It will use the
// or for individual functions ({UseLazyStub}). It will use the existing // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
// {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with // table with trampolines to that runtime stub.
// trampolines to that runtime stub. void SetLazyBuiltin();
void UseLazyStubs();
void UseLazyStub(uint32_t func_index);
// Initializes all runtime stubs by setting up entry addresses in the runtime // Initializes all runtime stubs by setting up entry addresses in the runtime
// stub table. It must be called exactly once per native module before adding // stub table. It must be called exactly once per native module before adding
...@@ -379,8 +370,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -379,8 +370,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; } UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; } void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; } bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); } Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); } const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; } std::shared_ptr<const WasmModule> shared_module() const { return module_; }
...@@ -405,8 +394,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -405,8 +394,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling }; enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
void SampleCodeSize(Counters*, CodeSamplingTime) const; void SampleCodeSize(Counters*, CodeSamplingTime) const;
WasmCodeUpdate AddCompiledCode(WasmCompilationResult); WasmCode* AddCompiledCode(WasmCompilationResult);
std::vector<WasmCodeUpdate> AddCompiledCode(Vector<WasmCompilationResult>); std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
// Free a set of functions of this module. Uncommits whole pages if possible. // Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all // The given vector must be ordered by the instruction start address, and all
...@@ -535,7 +524,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -535,7 +524,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler_ = kNoTrapHandler; UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false; bool is_executable_ = false;
bool lazy_compile_frozen_ = false; bool lazy_compile_frozen_ = false;
bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule); DISALLOW_COPY_AND_ASSIGN(NativeModule);
}; };
......
...@@ -195,7 +195,6 @@ struct V8_EXPORT_PRIVATE WasmModule { ...@@ -195,7 +195,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0; uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section. uint32_t num_declared_data_segments = 0; // From the DataCount section.
uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0}; WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index std::vector<uint32_t> signature_ids; // by signature index
......
...@@ -503,12 +503,7 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) { ...@@ -503,12 +503,7 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>(); size_t code_section_size = reader->Read<size_t>();
if (code_section_size == 0) { if (code_section_size == 0) return true;
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
native_module_->UseLazyStub(fn_index);
return true;
}
size_t constant_pool_offset = reader->Read<size_t>(); size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>(); size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>(); size_t handler_table_offset = reader->Read<size_t>();
...@@ -630,8 +625,9 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( ...@@ -630,8 +625,9 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null()); std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module(); NativeModule* native_module = module_object->native_module();
native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation); if (FLAG_wasm_lazy_compilation) {
native_module->SetLazyBuiltin();
}
NativeModuleDeserializer deserializer(native_module); NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize); Reader reader(data + kVersionSize);
......
...@@ -501,9 +501,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) { ...@@ -501,9 +501,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
WasmCompilationResult result = unit.ExecuteCompilation( WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(), &env, native_module->compilation_state()->GetWireBytesStorage(),
isolate()->counters(), &unused_detected_features); isolate()->counters(), &unused_detected_features);
WasmCodeUpdate code_update = WasmCode* code = native_module->AddCompiledCode(std::move(result));
native_module->AddCompiledCode(std::move(result));
WasmCode* code = code_update.code;
DCHECK_NOT_NULL(code); DCHECK_NOT_NULL(code);
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate()); if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
} }
......
...@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext, ...@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext,
desc.instr_size = static_cast<int>(size); desc.instr_size = static_cast<int>(size);
std::unique_ptr<WasmCode> code = native_module->AddCode( std::unique_ptr<WasmCode> code = native_module->AddCode(
index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther); index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther);
return native_module->PublishCode(std::move(code)).code; return native_module->PublishCode(std::move(code));
} }
size_t page() const { return AllocatePageSize(); } size_t page() const { return AllocatePageSize(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment