Commit be470c55 authored by Frederik Gossen's avatar Frederik Gossen Committed by Commit Bot

Revert "[wasm-hints] Enabled Lazy Compilation by Hint"

This reverts commit 09fa63a9.

Reason for revert: Falkes on https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20shared/29942

Original change's description:
> [wasm-hints] Enabled Lazy Compilation by Hint
> 
> Hints for lazy compilation are now taken into consideration. If the
> custom hints section suggests lazy compilatin we do so unless the module
> consists of a single function.
> 
> Bug: v8:9003
> Change-Id: Ibdc400453cee20d4d5c814733887b38fb675b220
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535827
> Commit-Queue: Frederik Gossen <frgossen@google.com>
> Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#60557}

TBR=mstarzinger@chromium.org,clemensh@chromium.org,frgossen@google.com

Change-Id: I18dd424fe8cf05f220f7498bb1ebe4b9fce7d240
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9003
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1547668Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60558}
parent 09fa63a9
......@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
std::move(result.source_positions), wasm::WasmCode::kFunction,
wasm::WasmCode::kOther);
// TODO(titzer): add counters for math intrinsic code size / allocation
return native_module->PublishCode(std::move(wasm_code)).code;
return native_module->PublishCode(std::move(wasm_code));
}
wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
......@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
wasm::WasmCode::kOther);
return native_module->PublishCode(std::move(wasm_code)).code;
return native_module->PublishCode(std::move(wasm_code));
}
wasm::WasmCompilationResult CompileWasmInterpreterEntry(
......@@ -6243,9 +6243,7 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
// TODO(bradnelson): Improve histogram handling of size_t.
counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
auto result = info.ReleaseWasmCompilationResult();
DCHECK_EQ(wasm::ExecutionTier::kOptimized, result->result_tier);
return std::move(*result);
return std::move(*info.ReleaseWasmCompilationResult());
}
wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
......@@ -6262,7 +6260,6 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_,
func_body.sig);
DCHECK(result.succeeded());
DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
return result;
}
......
......@@ -21,7 +21,6 @@ namespace wasm {
class NativeModule;
class WasmCode;
struct WasmCompilationResult;
class WasmError;
enum RuntimeExceptionSupport : bool {
......@@ -121,8 +120,7 @@ class CompilationState {
bool failed() const;
void FinishUnit(WasmCompilationResult);
void FinishUnits(Vector<WasmCompilationResult>);
void OnFinishedUnit(ExecutionTier, WasmCode*);
private:
friend class NativeModule;
......
......@@ -52,7 +52,6 @@ struct WasmCompilationResult {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); }
CodeDesc code_desc;
......
......@@ -128,7 +128,7 @@ class CompilationStateImpl {
// Set the number of compilations unit expected to be executed. Needs to be
// set before {AddCompilationUnits} is run, which triggers background
// compilation.
void SetNumberOfFunctionsToCompile(int num_functions, int num_lazy_functions);
void SetNumberOfFunctionsToCompile(int num_functions);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
......@@ -138,12 +138,10 @@ class CompilationStateImpl {
// Inserts new functions to compile and kicks off compilation.
void AddCompilationUnits(
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
std::vector<std::unique_ptr<WasmCompilationUnit>>& top_tier_units);
void AddTopTierCompilationUnit(std::unique_ptr<WasmCompilationUnit>);
std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units);
std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit();
void FinishUnit(WasmCompilationResult);
void FinishUnits(Vector<WasmCompilationResult>);
void OnFinishedUnit(ExecutionTier, WasmCode*);
void ReportDetectedFeatures(const WasmFeatures& detected);
void OnBackgroundTaskStopped(const WasmFeatures& detected);
......@@ -159,8 +157,9 @@ class CompilationStateImpl {
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
return outstanding_baseline_functions_ == 0;
return outstanding_baseline_units_ == 0 ||
(compile_mode_ == CompileMode::kTiering &&
outstanding_tiering_units_ == 0);
}
CompileMode compile_mode() const { return compile_mode_; }
......@@ -196,7 +195,7 @@ class CompilationStateImpl {
// Protected by {mutex_}:
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_compilation_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> top_tier_compilation_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_compilation_units_;
int num_background_tasks_ = 0;
......@@ -223,8 +222,8 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
int outstanding_baseline_functions_ = 0;
int outstanding_top_tier_functions_ = 0;
int outstanding_baseline_units_ = 0;
int outstanding_tiering_units_ = 0;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
......@@ -276,12 +275,8 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
bool CompilationState::failed() const { return Impl(this)->failed(); }
void CompilationState::FinishUnit(WasmCompilationResult result) {
Impl(this)->FinishUnit(std::move(result));
}
void CompilationState::FinishUnits(Vector<WasmCompilationResult> results) {
Impl(this)->FinishUnits(results);
void CompilationState::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
Impl(this)->OnFinishedUnit(tier, code);
}
// static
......@@ -295,9 +290,64 @@ std::unique_ptr<CompilationState> CompilationState::New(
// End of PIMPL implementation of {CompilationState}.
//////////////////////////////////////////////////////
void CompileLazy(Isolate* isolate, NativeModule* native_module,
uint32_t func_index) {
Counters* counters = isolate->counters();
HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
DCHECK(!native_module->lazy_compile_frozen());
base::ElapsedTimer compilation_timer;
NativeModuleModificationScope native_module_modification_scope(native_module);
DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
compilation_timer.Start();
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
const uint8_t* module_start = native_module->wire_bytes().start();
const WasmFunction* func = &native_module->module()->functions[func_index];
FunctionBody func_body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
ExecutionTier tier =
WasmCompilationUnit::GetDefaultExecutionTier(native_module->module());
WasmCompilationUnit unit(isolate->wasm_engine(), func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
isolate->counters(),
Impl(native_module->compilation_state())->detected_features());
WasmCode* code = native_module->AddCompiledCode(std::move(result));
// During lazy compilation, we should never get compilation errors. The module
// was verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
CHECK(!native_module->compilation_state()->failed());
// The code we just produced should be the one that was requested.
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
double func_kb = 1e-3 * func->code.length();
double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
counters->wasm_lazily_compiled_functions()->Increment();
int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
}
namespace {
ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
ExecutionTier apply_hint_to_execution_tier(WasmCompilationHintTier hint,
ExecutionTier default_tier) {
switch (hint) {
case WasmCompilationHintTier::kDefault:
......@@ -312,96 +362,61 @@ ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint,
UNREACHABLE();
}
const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
uint32_t func_index) {
DCHECK_LE(module->num_imported_functions, func_index);
uint32_t hint_index = func_index - module->num_imported_functions;
const std::vector<WasmCompilationHint>& compilation_hints =
module->compilation_hints;
if (hint_index < compilation_hints.size()) {
return &compilation_hints[hint_index];
}
return nullptr;
}
bool IsLazyCompilation(const WasmModule* module,
const NativeModule* native_module,
const WasmFeatures& enabled_features,
uint32_t func_index) {
if (native_module->lazy_compilation()) return true;
if (enabled_features.compilation_hints) {
const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
return hint != nullptr &&
hint->strategy == WasmCompilationHintStrategy::kLazy;
}
return false;
}
struct ExecutionTierPair {
ExecutionTier baseline_tier;
ExecutionTier top_tier;
};
// The {CompilationUnitBuilder} builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// {CompilationStateImpl} when {Commit} is called.
class CompilationUnitBuilder {
public:
explicit CompilationUnitBuilder(NativeModule* native_module,
WasmEngine* wasm_engine)
: native_module_(native_module),
wasm_engine_(wasm_engine),
default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
native_module->module())) {}
ExecutionTierPair GetRequestedExecutionTiers(
const WasmModule* module, CompileMode compile_mode,
const WasmFeatures& enabled_features, uint32_t func_index) {
ExecutionTierPair result;
switch (compile_mode) {
void AddUnit(uint32_t func_index) {
switch (compilation_state()->compile_mode()) {
case CompileMode::kRegular:
result.baseline_tier =
WasmCompilationUnit::GetDefaultExecutionTier(module);
result.top_tier = result.baseline_tier;
return result;
baseline_units_.emplace_back(CreateUnit(func_index, default_tier_));
return;
case CompileMode::kTiering:
// Default tiering behaviour.
result.baseline_tier = ExecutionTier::kBaseline;
result.top_tier = ExecutionTier::kOptimized;
ExecutionTier first_tier = ExecutionTier::kBaseline;
ExecutionTier second_tier = ExecutionTier::kOptimized;
// Check if compilation hints override default tiering behaviour.
if (enabled_features.compilation_hints) {
const WasmCompilationHint* hint =
GetCompilationHint(module, func_index);
if (hint != nullptr) {
result.baseline_tier =
ApplyHintToExecutionTier(hint->first_tier, result.baseline_tier);
result.top_tier =
ApplyHintToExecutionTier(hint->second_tier, result.top_tier);
if (native_module_->enabled_features().compilation_hints) {
// Find compilation hint.
CHECK_LE(native_module_->num_imported_functions(), func_index);
uint32_t hint_index =
func_index - native_module_->num_imported_functions();
const std::vector<WasmCompilationHint>& compilation_hints =
native_module_->module()->compilation_hints;
if (hint_index < compilation_hints.size()) {
WasmCompilationHint hint = compilation_hints[hint_index];
// Apply compilation hint.
first_tier =
apply_hint_to_execution_tier(hint.first_tier, first_tier);
second_tier =
apply_hint_to_execution_tier(hint.second_tier, second_tier);
}
}
// Correct top tier if necessary.
// Create compilation units and suppress duplicate compilation.
baseline_units_.emplace_back(
CreateUnit(func_index, std::move(first_tier)));
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
if (result.baseline_tier > result.top_tier) {
result.top_tier = result.baseline_tier;
if (first_tier < second_tier) {
tiering_units_.emplace_back(
CreateUnit(func_index, std::move(second_tier)));
}
return result;
return;
}
UNREACHABLE();
}
// The {CompilationUnitBuilder} builds compilation units and stores them in an
// internal buffer. The buffer is moved into the working queue of the
// {CompilationStateImpl} when {Commit} is called.
class CompilationUnitBuilder {
public:
explicit CompilationUnitBuilder(NativeModule* native_module,
WasmEngine* wasm_engine)
: native_module_(native_module),
wasm_engine_(wasm_engine),
default_tier_(WasmCompilationUnit::GetDefaultExecutionTier(
native_module->module())) {}
void AddUnits(uint32_t func_index) {
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
baseline_units_.emplace_back(CreateUnit(func_index, tiers.baseline_tier));
if (tiers.baseline_tier != tiers.top_tier) {
tiering_units_.emplace_back(CreateUnit(func_index, tiers.top_tier));
}
}
bool Commit() {
......@@ -434,76 +449,11 @@ class CompilationUnitBuilder {
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
};
} // namespace
void CompileLazy(Isolate* isolate, NativeModule* native_module,
uint32_t func_index) {
Counters* counters = isolate->counters();
HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
DCHECK(!native_module->lazy_compile_frozen());
base::ElapsedTimer compilation_timer;
NativeModuleModificationScope native_module_modification_scope(native_module);
DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
compilation_timer.Start();
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
const uint8_t* module_start = native_module->wire_bytes().start();
const WasmFunction* func = &native_module->module()->functions[func_index];
FunctionBody func_body{func->sig, func->code.offset(),
module_start + func->code.offset(),
module_start + func->code.end_offset()};
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module->module(), compilation_state->compile_mode(),
native_module->enabled_features(), func_index);
WasmCompilationUnit baseline_unit(isolate->wasm_engine(), func_index,
tiers.baseline_tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
&env, compilation_state->GetWireBytesStorage(), isolate->counters(),
compilation_state->detected_features());
WasmCodeUpdate update = native_module->AddCompiledCode(std::move(result));
WasmCode* code = update.code;
if (tiers.baseline_tier < tiers.top_tier) {
auto tiering_unit = base::make_unique<WasmCompilationUnit>(
isolate->wasm_engine(), func_index, tiers.top_tier);
compilation_state->AddTopTierCompilationUnit(std::move(tiering_unit));
}
// During lazy compilation, we should never get compilation errors. The module
// was verified before starting execution with lazy compilation.
// This might be OOM, but then we cannot continue execution anyway.
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
CHECK(!compilation_state->failed());
// The code we just produced should be the one that was requested.
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
double func_kb = 1e-3 * func->code.length();
double compilation_seconds = compilation_timer.Elapsed().InSecondsF();
counters->wasm_lazily_compiled_functions()->Increment();
int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
bool compile_lazy(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
(FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
}
namespace {
void RecordStats(const Code code, Counters* counters) {
counters->wasm_generated_code_size()->Increment(code->body_size());
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
......@@ -525,110 +475,32 @@ bool FetchAndExecuteCompilationUnit(CompilationEnv* env,
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextCompilationUnit();
if (unit == nullptr) return false;
WasmCompilationResult result = unit->ExecuteCompilation(
env, compilation_state->GetWireBytesStorage(), counters, detected);
if (result.succeeded()) {
compilation_state->FinishUnit(std::move(result));
WasmCode* code = native_module->AddCompiledCode(std::move(result));
compilation_state->OnFinishedUnit(result.requested_tier, code);
} else {
compilation_state->SetError();
}
return true;
}
void ValidateSequentially(Counters* counters, AccountingAllocator* allocator,
NativeModule* native_module, uint32_t func_index,
ErrorThrower* thrower) {
DCHECK(!thrower->error());
const WasmModule* module = native_module->module();
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
const WasmFunction* func = &module->functions[func_index];
Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
DecodeResult result;
{
auto time_counter = SELECT_WASM_COUNTER(counters, module->origin,
wasm_decode, function_time);
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
WasmFeatures detected;
result = VerifyWasmCode(allocator, native_module->enabled_features(),
module, &detected, body);
}
if (result.failed()) {
WasmName name = wire_bytes.GetNameOrNull(func, module);
if (name.start() == nullptr) {
thrower->CompileError("Compiling function #%d failed: %s @+%u",
func_index, result.error().message().c_str(),
result.error().offset());
} else {
TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
func_index, name.length(), name.start(),
result.error().message().c_str(),
result.error().offset());
}
}
}
void ValidateSequentially(Counters* counters, AccountingAllocator* allocator,
NativeModule* native_module, ErrorThrower* thrower) {
DCHECK(!thrower->error());
uint32_t start = native_module->module()->num_imported_functions;
uint32_t end = start + native_module->module()->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
ValidateSequentially(counters, allocator, native_module, func_index,
thrower);
if (thrower->error()) break;
}
return true;
}
// TODO(wasm): This function should not depend on an isolate. Internally, it is
// used for the ErrorThrower only.
bool InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module,
void InitializeCompilationUnits(NativeModule* native_module,
WasmEngine* wasm_engine) {
// Set number of functions that must be compiled to consider the module fully
// compiled.
auto wasm_module = native_module->module();
int num_functions = wasm_module->num_declared_functions;
DCHECK_IMPLIES(!native_module->enabled_features().compilation_hints,
wasm_module->num_lazy_compilation_hints == 0);
int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
compilation_state->SetNumberOfFunctionsToCompile(num_functions,
num_lazy_functions);
ErrorThrower thrower(isolate, "WebAssembly.compile()");
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = native_module->module();
CompilationUnitBuilder builder(native_module, wasm_engine);
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
if (IsLazyCompilation(module, native_module,
native_module->enabled_features(), func_index)) {
ValidateSequentially(isolate->counters(), isolate->allocator(),
native_module, func_index, &thrower);
native_module->UseLazyStub(func_index);
} else {
builder.AddUnits(func_index);
}
for (uint32_t i = start; i < end; ++i) {
builder.AddUnit(i);
}
builder.Commit();
// Handle potential errors internally.
if (thrower.error()) {
thrower.Reset();
return false;
}
return true;
}
void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
......@@ -651,19 +523,16 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
int num_wasm_functions =
static_cast<int>(native_module->module()->num_declared_functions);
compilation_state->SetNumberOfFunctionsToCompile(num_wasm_functions);
// 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units} within the
// {compilation_state}. By adding units to the {compilation_state}, new
// {BackgroundCompileTask} instances are spawned which run on
// background threads.
bool success = InitializeCompilationUnits(isolate, native_module,
isolate->wasm_engine());
if (!success) {
// TODO(frgossen): Add test coverage for this path.
DCHECK(native_module->enabled_features().compilation_hints);
compilation_state->SetError();
}
InitializeCompilationUnits(native_module, isolate->wasm_engine());
// 2) The background threads and the main thread pick one compilation unit at
// a time and execute the parallel phase of the compilation unit.
......@@ -702,13 +571,54 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module) {
UpdateFeatureUseCounts(isolate, detected);
}
void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
ErrorThrower* thrower) {
DCHECK(!thrower->error());
ModuleWireBytes wire_bytes{native_module->wire_bytes()};
const WasmModule* module = native_module->module();
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t i = start; i < end; ++i) {
const WasmFunction* func = &module->functions[i];
Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
DecodeResult result;
{
auto time_counter = SELECT_WASM_COUNTER(
isolate->counters(), module->origin, wasm_decode, function_time);
TimedHistogramScope wasm_decode_function_time_scope(time_counter);
WasmFeatures detected;
result = VerifyWasmCode(isolate->allocator(),
native_module->enabled_features(), module,
&detected, body);
}
if (result.failed()) {
WasmName name = wire_bytes.GetNameOrNull(func, module);
if (name.start() == nullptr) {
thrower->CompileError("Compiling function #%d failed: %s @+%u", i,
result.error().message().c_str(),
result.error().offset());
} else {
TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
i, name.length(), name.start(),
result.error().message().c_str(),
result.error().offset());
}
break;
}
}
}
void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
const WasmModule* wasm_module,
NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
if (FLAG_wasm_lazy_compilation ||
(FLAG_asm_wasm_lazy_compilation && wasm_module->origin == kAsmJsOrigin)) {
if (compile_lazy(wasm_module)) {
if (wasm_module->origin == kWasmOrigin) {
// Validate wasm modules for lazy compilation. Don't validate asm.js
// modules, they are valid by construction (otherwise a CHECK will fail
......@@ -716,12 +626,11 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// TODO(clemensh): According to the spec, we can actually skip validation
// at module creation time, and return a function that always traps at
// (lazy) compilation time.
ValidateSequentially(isolate->counters(), isolate->allocator(),
native_module, thrower);
ValidateSequentially(isolate, native_module, thrower);
if (thrower->error()) return;
}
native_module->set_lazy_compilation(true);
native_module->UseLazyStubs();
native_module->SetLazyBuiltin();
} else {
size_t funcs_to_compile =
wasm_module->functions.size() - wasm_module->num_imported_functions;
......@@ -737,8 +646,7 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
}
auto* compilation_state = Impl(native_module->compilation_state());
if (compilation_state->failed()) {
ValidateSequentially(isolate->counters(), isolate->allocator(),
native_module, thrower);
ValidateSequentially(isolate, native_module, thrower);
CHECK(thrower->error());
}
}
......@@ -790,9 +698,26 @@ class BackgroundCompileTask : public CancelableTask {
auto publish_results =
[&results_to_publish](BackgroundCompileScope* compile_scope) {
if (results_to_publish.empty()) return;
compile_scope->compilation_state()->FinishUnits(
// TODO(clemensh): Refactor {OnFinishedUnit} and remove this.
std::vector<ExecutionTier> requested_tiers;
requested_tiers.reserve(results_to_publish.size());
for (auto& result : results_to_publish) {
requested_tiers.push_back(result.requested_tier);
}
std::vector<WasmCode*> generated_code =
compile_scope->native_module()->AddCompiledCode(
VectorOf(results_to_publish));
results_to_publish.clear();
// Account for the finished compilation units.
// TODO(clemensh): This takes a lock on each invokation. Only do this
// once and pass accumulated counts.
DCHECK_EQ(generated_code.size(), requested_tiers.size());
for (size_t i = 0; i < generated_code.size(); ++i) {
compile_scope->compilation_state()->OnFinishedUnit(
requested_tiers[i], generated_code[i]);
}
};
bool compilation_failed = false;
......@@ -1085,8 +1010,7 @@ void AsyncCompileJob::DecodeFailed(const WasmError& error) {
void AsyncCompileJob::AsyncCompileFailed() {
ErrorThrower thrower(isolate_, "WebAssembly.compile()");
ValidateSequentially(isolate_->counters(), isolate_->allocator(),
native_module_.get(), &thrower);
ValidateSequentially(isolate_, native_module_.get(), &thrower);
DCHECK(thrower.error());
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
......@@ -1352,15 +1276,11 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// InitializeCompilationUnits always returns 0 for streaming compilation,
// then DoAsync would do the same as NextStep already.
compilation_state->SetNumberOfFunctionsToCompile(
module_->num_declared_functions);
// Add compilation units and kick off compilation.
auto isolate = job->isolate();
bool success = InitializeCompilationUnits(
isolate, job->native_module_.get(), isolate->wasm_engine());
if (!success) {
// TODO(frgossen): Add test coverage for this path.
DCHECK(job->native_module_->enabled_features().compilation_hints);
job->DoSync<CompileFailed>();
}
InitializeCompilationUnits(job->native_module_.get(),
job->isolate()->wasm_engine());
}
}
};
......@@ -1526,18 +1446,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
// task.
job_->DoImmediately<AsyncCompileJob::PrepareAndStartCompile>(
decoder_.shared_module(), false);
job_->native_module_->compilation_state()->SetWireBytesStorage(
std::move(wire_bytes_storage));
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
// Set number of functions that must be compiled to consider the module fully
// compiled.
auto wasm_module = job_->native_module_->module();
int num_functions = wasm_module->num_declared_functions;
DCHECK_IMPLIES(!job_->native_module_->enabled_features().compilation_hints,
wasm_module->num_lazy_compilation_hints == 0);
int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
compilation_state->SetNumberOfFunctionsToCompile(num_functions,
num_lazy_functions);
compilation_state->SetNumberOfFunctionsToCompile(functions_count);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
......@@ -1555,30 +1468,11 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
decoder_.DecodeFunctionBody(
num_functions_, static_cast<uint32_t>(bytes.length()), offset, false);
uint32_t func_index =
num_functions_ + decoder_.module()->num_imported_functions;
NativeModule* native_module = job_->native_module_.get();
if (IsLazyCompilation(native_module->module(), native_module,
native_module->enabled_features(), func_index)) {
ErrorThrower thrower(job_->isolate(), "WebAssembly.compile()");
auto counters = job_->isolate()->counters();
auto allocator = job_->isolate()->allocator();
ValidateSequentially(counters, allocator, native_module, func_index,
&thrower);
native_module->UseLazyStub(func_index);
if (thrower.error()) {
// TODO(frgossen): Add test coverage for this path.
DCHECK(native_module->enabled_features().compilation_hints);
thrower.Reset();
return false;
}
} else {
compilation_unit_builder_->AddUnits(func_index);
}
int index = num_functions_ + decoder_.module()->num_imported_functions;
compilation_unit_builder_->AddUnit(index);
++num_functions_;
// This method always succeeds. The return value is necessary to comply with
// the StreamingProcessor interface.
return true;
}
......@@ -1678,14 +1572,14 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
void CompilationStateImpl::SetNumberOfFunctionsToCompile(
int num_functions, int num_lazy_functions) {
void CompilationStateImpl::SetNumberOfFunctionsToCompile(int num_functions) {
DCHECK(!failed());
base::MutexGuard guard(&callbacks_mutex_);
outstanding_baseline_units_ = num_functions;
int num_functions_to_compile = num_functions - num_lazy_functions;
outstanding_baseline_functions_ = num_functions_to_compile;
outstanding_top_tier_functions_ = num_functions_to_compile;
if (compile_mode_ == CompileMode::kTiering) {
outstanding_tiering_units_ = num_functions;
}
}
void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
......@@ -1695,38 +1589,30 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
void CompilationStateImpl::AddCompilationUnits(
std::vector<std::unique_ptr<WasmCompilationUnit>>& baseline_units,
std::vector<std::unique_ptr<WasmCompilationUnit>>& top_tier_units) {
std::vector<std::unique_ptr<WasmCompilationUnit>>& tiering_units) {
{
base::MutexGuard guard(&mutex_);
DCHECK_LE(tiering_units.size(), baseline_units.size());
DCHECK_IMPLIES(compile_mode_ == CompileMode::kTiering &&
!native_module_->enabled_features().compilation_hints,
tiering_units.size() == baseline_units.size());
DCHECK_IMPLIES(compile_mode_ == CompileMode::kTiering &&
!native_module_->enabled_features().compilation_hints,
tiering_units.back()->tier() == ExecutionTier::kOptimized);
DCHECK_IMPLIES(compile_mode_ == CompileMode::kRegular,
top_tier_compilation_units_.empty());
tiering_compilation_units_.empty());
baseline_compilation_units_.insert(
baseline_compilation_units_.end(),
std::make_move_iterator(baseline_units.begin()),
std::make_move_iterator(baseline_units.end()));
if (!top_tier_units.empty()) {
top_tier_compilation_units_.insert(
top_tier_compilation_units_.end(),
std::make_move_iterator(top_tier_units.begin()),
std::make_move_iterator(top_tier_units.end()));
}
if (!tiering_units.empty()) {
tiering_compilation_units_.insert(
tiering_compilation_units_.end(),
std::make_move_iterator(tiering_units.begin()),
std::make_move_iterator(tiering_units.end()));
}
RestartBackgroundTasks();
}
void CompilationStateImpl::AddTopTierCompilationUnit(
std::unique_ptr<WasmCompilationUnit> unit) {
{
base::MutexGuard guard(&mutex_);
DCHECK_EQ(compile_mode_, CompileMode::kTiering);
DCHECK(FLAG_wasm_lazy_compilation || FLAG_asm_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
top_tier_compilation_units_.emplace_back(std::move(unit));
}
RestartBackgroundTasks();
......@@ -1736,105 +1622,65 @@ std::unique_ptr<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit() {
base::MutexGuard guard(&mutex_);
std::vector<std::unique_ptr<WasmCompilationUnit>>* units = nullptr;
if (!baseline_compilation_units_.empty()) {
units = &baseline_compilation_units_;
} else if (!top_tier_compilation_units_.empty()) {
units = &top_tier_compilation_units_;
} else {
return std::unique_ptr<WasmCompilationUnit>();
}
DCHECK_NOT_NULL(units);
DCHECK(!units->empty());
std::vector<std::unique_ptr<WasmCompilationUnit>>& units =
baseline_compilation_units_.empty() ? tiering_compilation_units_
: baseline_compilation_units_;
std::unique_ptr<WasmCompilationUnit> unit = std::move(units->back());
units->pop_back();
if (!units.empty()) {
std::unique_ptr<WasmCompilationUnit> unit = std::move(units.back());
units.pop_back();
return unit;
}
}
void CompilationStateImpl::FinishUnit(WasmCompilationResult result) {
FinishUnits({&result, 1});
return std::unique_ptr<WasmCompilationUnit>();
}
void CompilationStateImpl::FinishUnits(
Vector<WasmCompilationResult> compilation_results) {
void CompilationStateImpl::OnFinishedUnit(ExecutionTier tier, WasmCode* code) {
// This mutex guarantees that events happen in the right order.
base::MutexGuard guard(&callbacks_mutex_);
// Assume an order of execution tiers that represents the quality of their
// generated code.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
auto module = native_module_->module();
auto enabled_features = native_module_->enabled_features();
std::vector<WasmCodeUpdate> code_update_vector =
native_module_->AddCompiledCode(compilation_results);
for (WasmCodeUpdate& code_update : code_update_vector) {
DCHECK_NOT_NULL(code_update.code);
DCHECK(code_update.tier.has_value());
native_module_->engine()->LogCode(code_update.code);
uint32_t func_index = code_update.code->index();
ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
module, compile_mode(), enabled_features, func_index);
// Reconstruct state before code update.
bool had_reached_baseline = code_update.prior_tier.has_value();
bool had_reached_top_tier =
code_update.prior_tier.has_value() &&
code_update.prior_tier.value() >= requested_tiers.top_tier;
DCHECK_IMPLIES(had_reached_baseline, code_update.prior_tier.has_value() &&
code_update.prior_tier.value() >=
requested_tiers.baseline_tier);
// Conclude whether we are reaching baseline or top tier.
bool reaches_baseline = !had_reached_baseline;
bool reaches_top_tier =
!had_reached_top_tier &&
code_update.tier.value() >= requested_tiers.top_tier;
DCHECK_IMPLIES(reaches_baseline,
code_update.tier.value() >= requested_tiers.baseline_tier);
DCHECK_IMPLIES(reaches_top_tier, had_reached_baseline || reaches_baseline);
// Remember state before update.
bool had_completed_baseline_compilation =
outstanding_baseline_functions_ == 0;
bool had_completed_top_tier_compilation =
outstanding_top_tier_functions_ == 0;
// Update state.
if (!IsLazyCompilation(module, native_module_, enabled_features,
func_index)) {
if (reaches_baseline) outstanding_baseline_functions_--;
if (reaches_top_tier) outstanding_top_tier_functions_--;
}
DCHECK_LE(0, outstanding_baseline_functions_);
DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
// Conclude if we are completing baseline or top tier compilation.
bool completes_baseline_compilation = !had_completed_baseline_compilation &&
outstanding_baseline_functions_ == 0;
bool completes_top_tier_compilation = !had_completed_top_tier_compilation &&
outstanding_top_tier_functions_ == 0;
DCHECK_IMPLIES(
completes_top_tier_compilation,
had_completed_baseline_compilation || completes_baseline_compilation);
// Trigger callbacks.
if (completes_baseline_compilation) {
// If we are *not* compiling in tiering mode, then all units are counted as
// baseline units.
bool is_tiering_mode = compile_mode_ == CompileMode::kTiering;
bool is_tiering_unit = is_tiering_mode && tier == ExecutionTier::kOptimized;
// Sanity check: If we are not in tiering mode, there cannot be outstanding
// tiering units.
DCHECK_IMPLIES(!is_tiering_mode, outstanding_tiering_units_ == 0);
bool baseline_finished = false;
bool tiering_finished = false;
if (is_tiering_unit) {
DCHECK_LT(0, outstanding_tiering_units_);
--outstanding_tiering_units_;
tiering_finished = outstanding_tiering_units_ == 0;
// If baseline compilation has not finished yet, then also trigger
// {kFinishedBaselineCompilation}.
baseline_finished = tiering_finished && outstanding_baseline_units_ > 0;
} else {
DCHECK_LT(0, outstanding_baseline_units_);
--outstanding_baseline_units_;
// If we are in tiering mode and tiering finished before, then do not
// trigger baseline finished.
baseline_finished = outstanding_baseline_units_ == 0 &&
(!is_tiering_mode || outstanding_tiering_units_ > 0);
// If we are not tiering, then we also trigger the "top tier finished"
// event when baseline compilation is finished.
tiering_finished = baseline_finished && !is_tiering_mode;
}
if (baseline_finished) {
for (auto& callback : callbacks_)
callback(CompilationEvent::kFinishedBaselineCompilation);
}
if (completes_top_tier_compilation) {
if (tiering_finished) {
for (auto& callback : callbacks_)
callback(CompilationEvent::kFinishedTopTierCompilation);
// Clear the callbacks because no more events will be delivered.
callbacks_.clear();
}
}
if (code != nullptr) native_module_->engine()->LogCode(code);
}
void CompilationStateImpl::RestartBackgroundCompileTask() {
......@@ -1884,7 +1730,7 @@ void CompilationStateImpl::RestartBackgroundTasks() {
DCHECK_LE(num_background_tasks_, max_background_tasks_);
if (num_background_tasks_ == max_background_tasks_) return;
size_t num_compilation_units =
baseline_compilation_units_.size() + top_tier_compilation_units_.size();
baseline_compilation_units_.size() + tiering_compilation_units_.size();
num_restart = max_background_tasks_ - num_background_tasks_;
DCHECK_LE(0, num_restart);
if (num_compilation_units < static_cast<size_t>(num_restart)) {
......
......@@ -1008,10 +1008,7 @@ class ModuleDecoderImpl : public Decoder {
}
// Decode sequence of compilation hints.
if (decoder.ok()) {
module_->compilation_hints.reserve(hint_count);
module_->num_lazy_compilation_hints = 0;
}
if (decoder.ok()) module_->compilation_hints.reserve(hint_count);
for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
TRACE("DecodeCompilationHints[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
......@@ -1050,18 +1047,12 @@ class ModuleDecoderImpl : public Decoder {
}
// Happily accept compilation hint.
if (decoder.ok()) {
if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
module_->num_lazy_compilation_hints++;
}
module_->compilation_hints.push_back(std::move(hint));
}
if (decoder.ok()) module_->compilation_hints.push_back(std::move(hint));
}
// If section was invalid reset compilation hints.
if (decoder.failed()) {
module_->compilation_hints.clear();
module_->num_lazy_compilation_hints = 0;
}
// @TODO(frgossen) Skip the whole compilation hints section in the outer
......
......@@ -433,25 +433,19 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
}
void NativeModule::UseLazyStubs() {
uint32_t start = module_->num_imported_functions;
uint32_t end = start + module_->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
UseLazyStub(func_index);
}
}
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
void NativeModule::SetLazyBuiltin() {
uint32_t num_wasm_functions = module_->num_declared_functions;
if (num_wasm_functions == 0) return;
// Fill the jump table with jumps to the lazy compile stub.
Address lazy_compile_target = runtime_stub_entry(WasmCode::kWasmCompileLazy);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), slot_index, func_index,
runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
jump_table_->instruction_start(), i,
i + module_->num_imported_functions, lazy_compile_target,
WasmCode::kNoFlushICache);
}
FlushInstructionCache(jump_table_->instructions().start(),
jump_table_->instructions().size());
}
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
......@@ -587,7 +581,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
new_code->MaybePrint(name);
new_code->Validate();
return PublishCode(std::move(new_code)).code;
return PublishCode(std::move(new_code));
}
std::unique_ptr<WasmCode> NativeModule::AddCode(
......@@ -673,98 +667,39 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
return code;
}
WasmCodeUpdate NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
// TODO(frgossen): We should merge ExecutionTier with WasmCode::Tier.
base::Optional<ExecutionTier> GetExecutionTier(WasmCode* code) {
if (code == nullptr) return {};
switch (code->tier()) {
case WasmCode::Tier::kLiftoff:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kBaseline;
case WasmCode::Tier::kTurbofan:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kOptimized;
case WasmCode::Tier::kOther:
if (code->kind() == WasmCode::Kind::kInterpreterEntry)
return ExecutionTier::kInterpreter;
return {};
}
UNREACHABLE();
}
} // namespace
WasmCodeUpdate NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
base::Optional<ExecutionTier> prior_tier;
base::Optional<ExecutionTier> tier;
// Skip publishing code if there is an active redirection to the interpreter
// for the given function index, in order to preserve the redirection.
if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) {
DCHECK_LT(code->index(), num_functions());
DCHECK_LE(module_->num_imported_functions, code->index());
// Assume an order of execution tiers that represents the quality of their
// generated code.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
// Update code table but avoid to fall back to less optimized code. We use
// the new code if it was compiled with a higher tier and also if we cannot
// determine the tier.
// Update code table, except for interpreter entries that would overwrite
// existing code.
uint32_t slot_idx = code->index() - module_->num_imported_functions;
WasmCode* prior_code = code_table_[slot_idx];
prior_tier = GetExecutionTier(prior_code);
tier = GetExecutionTier(code.get());
bool code_upgrade = !prior_tier.has_value() || !tier.has_value() ||
prior_tier.value() < tier.value();
if (code_upgrade) {
if (code->kind() != WasmCode::kInterpreterEntry ||
code_table_[slot_idx] == nullptr) {
code_table_[slot_idx] = code.get();
}
// Patch jump table. Ensure to use optimized code and interpreter entries.
if (code_upgrade || code->kind_ == WasmCode::Kind::kInterpreterEntry) {
JumpTableAssembler::PatchJumpTableSlot(
jump_table_->instruction_start(), slot_idx, code->instruction_start(),
// Patch jump table.
JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
slot_idx, code->instruction_start(),
WasmCode::kFlushICache);
}
}
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
SetInterpreterRedirection(code->index());
}
WasmCodeUpdate update;
update.code = code.get();
update.tier = tier;
update.prior_tier = prior_tier;
WasmCode* ret = code.get();
owned_code_.emplace_back(std::move(code));
return update;
return ret;
}
WasmCode* NativeModule::AddDeserializedCode(
......@@ -791,7 +726,7 @@ WasmCode* NativeModule::AddDeserializedCode(
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
return PublishCode(std::move(code)).code;
return PublishCode(std::move(code));
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
......@@ -823,7 +758,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
OwnedVector<const uint8_t>{}, // source_pos
WasmCode::kJumpTable, // kind
WasmCode::kOther}}; // tier
return PublishCode(std::move(code)).code;
return PublishCode(std::move(code));
}
Vector<byte> NativeModule::AllocateForCode(size_t size) {
......@@ -1267,11 +1202,34 @@ void NativeModule::SampleCodeSize(
histogram->AddSample(code_size_mb);
}
WasmCodeUpdate NativeModule::AddCompiledCode(WasmCompilationResult result) {
namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
} // namespace
WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
return AddCompiledCode({&result, 1})[0];
}
std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode(
std::vector<WasmCode*> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
DCHECK(!results.is_empty());
// First, allocate code space for all the results.
......@@ -1301,15 +1259,16 @@ std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode(
DCHECK_EQ(0, code_space.size());
// Under the {allocation_mutex_}, publish the code.
std::vector<WasmCodeUpdate> code_updates;
code_updates.reserve(results.size());
std::vector<WasmCode*> returned_code;
returned_code.reserve(results.size());
{
base::MutexGuard lock(&allocation_mutex_);
for (auto& result : generated_code)
code_updates.push_back(PublishCodeLocked(std::move(result)));
for (auto& result : generated_code) {
returned_code.push_back(PublishCodeLocked(std::move(result)));
}
}
return code_updates;
return returned_code;
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
......
......@@ -14,7 +14,6 @@
#include <vector>
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
......@@ -73,12 +72,6 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
struct WasmCodeUpdate {
WasmCode* code = nullptr;
base::Optional<ExecutionTier> tier;
base::Optional<ExecutionTier> prior_tier;
};
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
......@@ -277,9 +270,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object.
WasmCodeUpdate PublishCode(std::unique_ptr<WasmCode>);
WasmCode* PublishCode(std::unique_ptr<WasmCode>);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCodeUpdate PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
......@@ -295,12 +288,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
// Use this to setup lazy compilation for the entire module ({UseLazyStubs})
// or for individual functions ({UseLazyStub}). It will use the existing
// {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with
// trampolines to that runtime stub.
void UseLazyStubs();
void UseLazyStub(uint32_t func_index);
// Use this to start lazy compilation for the entire module. It will use the
// existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
// table with trampolines to that runtime stub.
void SetLazyBuiltin();
// Initializes all runtime stubs by setting up entry addresses in the runtime
// stub table. It must be called exactly once per native module before adding
......@@ -379,8 +370,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
......@@ -405,8 +394,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
void SampleCodeSize(Counters*, CodeSamplingTime) const;
WasmCodeUpdate AddCompiledCode(WasmCompilationResult);
std::vector<WasmCodeUpdate> AddCompiledCode(Vector<WasmCompilationResult>);
WasmCode* AddCompiledCode(WasmCompilationResult);
std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
......@@ -535,7 +524,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
......
......@@ -195,7 +195,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
......
......@@ -503,12 +503,7 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>();
if (code_section_size == 0) {
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
native_module_->UseLazyStub(fn_index);
return true;
}
if (code_section_size == 0) return true;
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
......@@ -630,8 +625,9 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module();
native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation);
if (FLAG_wasm_lazy_compilation) {
native_module->SetLazyBuiltin();
}
NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize);
......
......@@ -501,9 +501,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
isolate()->counters(), &unused_detected_features);
WasmCodeUpdate code_update =
native_module->AddCompiledCode(std::move(result));
WasmCode* code = code_update.code;
WasmCode* code = native_module->AddCompiledCode(std::move(result));
DCHECK_NOT_NULL(code);
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
}
......
......@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext,
desc.instr_size = static_cast<int>(size);
std::unique_ptr<WasmCode> code = native_module->AddCode(
index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther);
return native_module->PublishCode(std::move(code)).code;
return native_module->PublishCode(std::move(code));
}
size_t page() const { return AllocatePageSize(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment