Commit 09fa63a9 authored by Frederik Gossen's avatar Frederik Gossen Committed by Commit Bot

[wasm-hints] Enabled Lazy Compilation by Hint

Hints for lazy compilation are now taken into consideration. If the
custom hints section suggests lazy compilatin we do so unless the module
consists of a single function.

Bug: v8:9003
Change-Id: Ibdc400453cee20d4d5c814733887b38fb675b220
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535827
Commit-Queue: Frederik Gossen <frgossen@google.com>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60557}
parent c63350a9
......@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
std::move(result.source_positions), wasm::WasmCode::kFunction,
wasm::WasmCode::kOther);
// TODO(titzer): add counters for math intrinsic code size / allocation
return native_module->PublishCode(std::move(wasm_code));
return native_module->PublishCode(std::move(wasm_code)).code;
}
wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
......@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
wasm::WasmCode::kOther);
return native_module->PublishCode(std::move(wasm_code));
return native_module->PublishCode(std::move(wasm_code)).code;
}
wasm::WasmCompilationResult CompileWasmInterpreterEntry(
......@@ -6243,7 +6243,9 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
// TODO(bradnelson): Improve histogram handling of size_t.
counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
return std::move(*info.ReleaseWasmCompilationResult());
auto result = info.ReleaseWasmCompilationResult();
DCHECK_EQ(wasm::ExecutionTier::kOptimized, result->result_tier);
return std::move(*result);
}
wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
......@@ -6260,6 +6262,7 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_,
func_body.sig);
DCHECK(result.succeeded());
DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
return result;
}
......
......@@ -21,6 +21,7 @@ namespace wasm {
class NativeModule;
class WasmCode;
struct WasmCompilationResult;
class WasmError;
enum RuntimeExceptionSupport : bool {
......@@ -120,7 +121,8 @@ class CompilationState {
bool failed() const;
void OnFinishedUnit(ExecutionTier, WasmCode*);
void FinishUnit(WasmCompilationResult);
void FinishUnits(Vector<WasmCompilationResult>);
private:
friend class NativeModule;
......
......@@ -52,6 +52,7 @@ struct WasmCompilationResult {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); }
CodeDesc code_desc;
......
This diff is collapsed.
......@@ -1008,7 +1008,10 @@ class ModuleDecoderImpl : public Decoder {
}
// Decode sequence of compilation hints.
if (decoder.ok()) module_->compilation_hints.reserve(hint_count);
if (decoder.ok()) {
module_->compilation_hints.reserve(hint_count);
module_->num_lazy_compilation_hints = 0;
}
for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
TRACE("DecodeCompilationHints[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
......@@ -1047,12 +1050,18 @@ class ModuleDecoderImpl : public Decoder {
}
// Happily accept compilation hint.
if (decoder.ok()) module_->compilation_hints.push_back(std::move(hint));
if (decoder.ok()) {
if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
module_->num_lazy_compilation_hints++;
}
module_->compilation_hints.push_back(std::move(hint));
}
}
// If section was invalid reset compilation hints.
if (decoder.failed()) {
module_->compilation_hints.clear();
module_->num_lazy_compilation_hints = 0;
}
// @TODO(frgossen) Skip the whole compilation hints section in the outer
......
......@@ -433,19 +433,25 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
}
void NativeModule::SetLazyBuiltin() {
uint32_t num_wasm_functions = module_->num_declared_functions;
if (num_wasm_functions == 0) return;
// Fill the jump table with jumps to the lazy compile stub.
Address lazy_compile_target = runtime_stub_entry(WasmCode::kWasmCompileLazy);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), i,
i + module_->num_imported_functions, lazy_compile_target,
WasmCode::kNoFlushICache);
void NativeModule::UseLazyStubs() {
uint32_t start = module_->num_imported_functions;
uint32_t end = start + module_->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
UseLazyStub(func_index);
}
FlushInstructionCache(jump_table_->instructions().start(),
jump_table_->instructions().size());
}
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), slot_index, func_index,
runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
}
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
......@@ -581,7 +587,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
new_code->MaybePrint(name);
new_code->Validate();
return PublishCode(std::move(new_code));
return PublishCode(std::move(new_code)).code;
}
std::unique_ptr<WasmCode> NativeModule::AddCode(
......@@ -667,39 +673,98 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
return code;
}
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
WasmCodeUpdate NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
// TODO(frgossen): We should merge ExecutionTier with WasmCode::Tier.
base::Optional<ExecutionTier> GetExecutionTier(WasmCode* code) {
if (code == nullptr) return {};
switch (code->tier()) {
case WasmCode::Tier::kLiftoff:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kBaseline;
case WasmCode::Tier::kTurbofan:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kOptimized;
case WasmCode::Tier::kOther:
if (code->kind() == WasmCode::Kind::kInterpreterEntry)
return ExecutionTier::kInterpreter;
return {};
}
UNREACHABLE();
}
} // namespace
WasmCodeUpdate NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
base::Optional<ExecutionTier> prior_tier;
base::Optional<ExecutionTier> tier;
// Skip publishing code if there is an active redirection to the interpreter
// for the given function index, in order to preserve the redirection.
if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) {
DCHECK_LT(code->index(), num_functions());
DCHECK_LE(module_->num_imported_functions, code->index());
// Update code table, except for interpreter entries that would overwrite
// existing code.
// Assume an order of execution tiers that represents the quality of their
// generated code.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
// Update code table but avoid to fall back to less optimized code. We use
// the new code if it was compiled with a higher tier and also if we cannot
// determine the tier.
uint32_t slot_idx = code->index() - module_->num_imported_functions;
if (code->kind() != WasmCode::kInterpreterEntry ||
code_table_[slot_idx] == nullptr) {
WasmCode* prior_code = code_table_[slot_idx];
prior_tier = GetExecutionTier(prior_code);
tier = GetExecutionTier(code.get());
bool code_upgrade = !prior_tier.has_value() || !tier.has_value() ||
prior_tier.value() < tier.value();
if (code_upgrade) {
code_table_[slot_idx] = code.get();
}
// Patch jump table.
JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
slot_idx, code->instruction_start(),
WasmCode::kFlushICache);
// Patch jump table. Ensure to use optimized code and interpreter entries.
if (code_upgrade || code->kind_ == WasmCode::Kind::kInterpreterEntry) {
JumpTableAssembler::PatchJumpTableSlot(
jump_table_->instruction_start(), slot_idx, code->instruction_start(),
WasmCode::kFlushICache);
}
}
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
SetInterpreterRedirection(code->index());
}
WasmCode* ret = code.get();
WasmCodeUpdate update;
update.code = code.get();
update.tier = tier;
update.prior_tier = prior_tier;
owned_code_.emplace_back(std::move(code));
return ret;
return update;
}
WasmCode* NativeModule::AddDeserializedCode(
......@@ -726,7 +791,7 @@ WasmCode* NativeModule::AddDeserializedCode(
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
return PublishCode(std::move(code));
return PublishCode(std::move(code)).code;
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
......@@ -758,7 +823,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
OwnedVector<const uint8_t>{}, // source_pos
WasmCode::kJumpTable, // kind
WasmCode::kOther}}; // tier
return PublishCode(std::move(code));
return PublishCode(std::move(code)).code;
}
Vector<byte> NativeModule::AllocateForCode(size_t size) {
......@@ -1202,34 +1267,11 @@ void NativeModule::SampleCodeSize(
histogram->AddSample(code_size_mb);
}
namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
} // namespace
WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
WasmCodeUpdate NativeModule::AddCompiledCode(WasmCompilationResult result) {
return AddCompiledCode({&result, 1})[0];
}
std::vector<WasmCode*> NativeModule::AddCompiledCode(
std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) {
DCHECK(!results.is_empty());
// First, allocate code space for all the results.
......@@ -1259,16 +1301,15 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
DCHECK_EQ(0, code_space.size());
// Under the {allocation_mutex_}, publish the code.
std::vector<WasmCode*> returned_code;
returned_code.reserve(results.size());
std::vector<WasmCodeUpdate> code_updates;
code_updates.reserve(results.size());
{
base::MutexGuard lock(&allocation_mutex_);
for (auto& result : generated_code) {
returned_code.push_back(PublishCodeLocked(std::move(result)));
}
for (auto& result : generated_code)
code_updates.push_back(PublishCodeLocked(std::move(result)));
}
return returned_code;
return code_updates;
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
......
......@@ -14,6 +14,7 @@
#include <vector>
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
......@@ -72,6 +73,12 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
struct WasmCodeUpdate {
WasmCode* code = nullptr;
base::Optional<ExecutionTier> tier;
base::Optional<ExecutionTier> prior_tier;
};
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
......@@ -270,9 +277,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object.
WasmCode* PublishCode(std::unique_ptr<WasmCode>);
WasmCodeUpdate PublishCode(std::unique_ptr<WasmCode>);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCodeUpdate PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
......@@ -288,10 +295,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
// Use this to start lazy compilation for the entire module. It will use the
// existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
// table with trampolines to that runtime stub.
void SetLazyBuiltin();
// Use this to setup lazy compilation for the entire module ({UseLazyStubs})
// or for individual functions ({UseLazyStub}). It will use the existing
// {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with
// trampolines to that runtime stub.
void UseLazyStubs();
void UseLazyStub(uint32_t func_index);
// Initializes all runtime stubs by setting up entry addresses in the runtime
// stub table. It must be called exactly once per native module before adding
......@@ -370,6 +379,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
......@@ -394,8 +405,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
void SampleCodeSize(Counters*, CodeSamplingTime) const;
WasmCode* AddCompiledCode(WasmCompilationResult);
std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
WasmCodeUpdate AddCompiledCode(WasmCompilationResult);
std::vector<WasmCodeUpdate> AddCompiledCode(Vector<WasmCompilationResult>);
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
......@@ -524,6 +535,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
......
......@@ -195,6 +195,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
......
......@@ -503,7 +503,12 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>();
if (code_section_size == 0) return true;
if (code_section_size == 0) {
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
native_module_->UseLazyStub(fn_index);
return true;
}
size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>();
......@@ -625,9 +630,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module();
if (FLAG_wasm_lazy_compilation) {
native_module->SetLazyBuiltin();
}
native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation);
NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize);
......
......@@ -501,7 +501,9 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(),
isolate()->counters(), &unused_detected_features);
WasmCode* code = native_module->AddCompiledCode(std::move(result));
WasmCodeUpdate code_update =
native_module->AddCompiledCode(std::move(result));
WasmCode* code = code_update.code;
DCHECK_NOT_NULL(code);
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
}
......
......@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext,
desc.instr_size = static_cast<int>(size);
std::unique_ptr<WasmCode> code = native_module->AddCode(
index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther);
return native_module->PublishCode(std::move(code));
return native_module->PublishCode(std::move(code)).code;
}
size_t page() const { return AllocatePageSize(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment