Commit 9ac3ec56 authored by Frederik Gossen's avatar Frederik Gossen Committed by Commit Bot

Reland "[wasm-hints] Enabled Lazy Compilation by Hint"

This is a reland of 09fa63a9

Original change's description:
> [wasm-hints] Enabled Lazy Compilation by Hint
>
> Hints for lazy compilation are now taken into consideration. If the
> custom hints section suggests lazy compilatin we do so unless the module
> consists of a single function.
>
> Bug: v8:9003
> Change-Id: Ibdc400453cee20d4d5c814733887b38fb675b220
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535827
> Commit-Queue: Frederik Gossen <frgossen@google.com>
> Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#60557}

Bug: v8:9003
No-Try: true
Change-Id: I8d6f4518aa548c815fba4e6e62d2206129336cc6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1547851
Commit-Queue: Frederik Gossen <frgossen@google.com>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60564}
parent 37cdcbcf
...@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, ...@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
std::move(result.source_positions), wasm::WasmCode::kFunction, std::move(result.source_positions), wasm::WasmCode::kFunction,
wasm::WasmCode::kOther); wasm::WasmCode::kOther);
// TODO(titzer): add counters for math intrinsic code size / allocation // TODO(titzer): add counters for math intrinsic code size / allocation
return native_module->PublishCode(std::move(wasm_code)); return native_module->PublishCode(std::move(wasm_code)).code;
} }
wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
...@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, ...@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
std::move(result.protected_instructions), std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper, std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
wasm::WasmCode::kOther); wasm::WasmCode::kOther);
return native_module->PublishCode(std::move(wasm_code)); return native_module->PublishCode(std::move(wasm_code)).code;
} }
wasm::WasmCompilationResult CompileWasmInterpreterEntry( wasm::WasmCompilationResult CompileWasmInterpreterEntry(
...@@ -6243,7 +6243,9 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation( ...@@ -6243,7 +6243,9 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
// TODO(bradnelson): Improve histogram handling of size_t. // TODO(bradnelson): Improve histogram handling of size_t.
counters->wasm_compile_function_peak_memory_bytes()->AddSample( counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size())); static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
return std::move(*info.ReleaseWasmCompilationResult()); auto result = info.ReleaseWasmCompilationResult();
DCHECK_EQ(wasm::ExecutionTier::kOptimized, result->result_tier);
return std::move(*result);
} }
wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation( wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
...@@ -6260,6 +6262,7 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation( ...@@ -6260,6 +6262,7 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_, wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_,
func_body.sig); func_body.sig);
DCHECK(result.succeeded()); DCHECK(result.succeeded());
DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
return result; return result;
} }
......
...@@ -21,6 +21,7 @@ namespace wasm { ...@@ -21,6 +21,7 @@ namespace wasm {
class NativeModule; class NativeModule;
class WasmCode; class WasmCode;
struct WasmCompilationResult;
class WasmError; class WasmError;
enum RuntimeExceptionSupport : bool { enum RuntimeExceptionSupport : bool {
...@@ -120,7 +121,8 @@ class CompilationState { ...@@ -120,7 +121,8 @@ class CompilationState {
bool failed() const; bool failed() const;
void OnFinishedUnit(ExecutionTier, WasmCode*); void FinishUnit(WasmCompilationResult);
void FinishUnits(Vector<WasmCompilationResult>);
private: private:
friend class NativeModule; friend class NativeModule;
......
...@@ -52,6 +52,7 @@ struct WasmCompilationResult { ...@@ -52,6 +52,7 @@ struct WasmCompilationResult {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult); MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
bool succeeded() const { return code_desc.buffer != nullptr; } bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); } operator bool() const { return succeeded(); }
CodeDesc code_desc; CodeDesc code_desc;
......
This diff is collapsed.
...@@ -1008,7 +1008,10 @@ class ModuleDecoderImpl : public Decoder { ...@@ -1008,7 +1008,10 @@ class ModuleDecoderImpl : public Decoder {
} }
// Decode sequence of compilation hints. // Decode sequence of compilation hints.
if (decoder.ok()) module_->compilation_hints.reserve(hint_count); if (decoder.ok()) {
module_->compilation_hints.reserve(hint_count);
module_->num_lazy_compilation_hints = 0;
}
for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) { for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
TRACE("DecodeCompilationHints[%d] module+%d\n", i, TRACE("DecodeCompilationHints[%d] module+%d\n", i,
static_cast<int>(pc_ - start_)); static_cast<int>(pc_ - start_));
...@@ -1047,12 +1050,18 @@ class ModuleDecoderImpl : public Decoder { ...@@ -1047,12 +1050,18 @@ class ModuleDecoderImpl : public Decoder {
} }
// Happily accept compilation hint. // Happily accept compilation hint.
if (decoder.ok()) module_->compilation_hints.push_back(std::move(hint)); if (decoder.ok()) {
if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
module_->num_lazy_compilation_hints++;
}
module_->compilation_hints.push_back(std::move(hint));
}
} }
// If section was invalid reset compilation hints. // If section was invalid reset compilation hints.
if (decoder.failed()) { if (decoder.failed()) {
module_->compilation_hints.clear(); module_->compilation_hints.clear();
module_->num_lazy_compilation_hints = 0;
} }
// @TODO(frgossen) Skip the whole compilation hints section in the outer // @TODO(frgossen) Skip the whole compilation hints section in the outer
......
...@@ -433,19 +433,25 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) { ...@@ -433,19 +433,25 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
return AddAndPublishAnonymousCode(code, WasmCode::kFunction); return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
} }
void NativeModule::SetLazyBuiltin() { void NativeModule::UseLazyStubs() {
uint32_t num_wasm_functions = module_->num_declared_functions; uint32_t start = module_->num_imported_functions;
if (num_wasm_functions == 0) return; uint32_t end = start + module_->num_declared_functions;
// Fill the jump table with jumps to the lazy compile stub. for (uint32_t func_index = start; func_index < end; func_index++) {
Address lazy_compile_target = runtime_stub_entry(WasmCode::kWasmCompileLazy); UseLazyStub(func_index);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), i,
i + module_->num_imported_functions, lazy_compile_target,
WasmCode::kNoFlushICache);
} }
FlushInstructionCache(jump_table_->instructions().start(), }
jump_table_->instructions().size());
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), slot_index, func_index,
runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
} }
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS} // TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
...@@ -581,7 +587,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, ...@@ -581,7 +587,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
new_code->MaybePrint(name); new_code->MaybePrint(name);
new_code->Validate(); new_code->Validate();
return PublishCode(std::move(new_code)); return PublishCode(std::move(new_code)).code;
} }
std::unique_ptr<WasmCode> NativeModule::AddCode( std::unique_ptr<WasmCode> NativeModule::AddCode(
...@@ -667,39 +673,101 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace( ...@@ -667,39 +673,101 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
return code; return code;
} }
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) { WasmCodeUpdate NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code)); return PublishCodeLocked(std::move(code));
} }
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) { namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
// TODO(frgossen): We should merge ExecutionTier with WasmCode::Tier.
base::Optional<ExecutionTier> GetExecutionTier(WasmCode* code) {
if (code == nullptr) return {};
switch (code->tier()) {
case WasmCode::Tier::kLiftoff:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kBaseline;
case WasmCode::Tier::kTurbofan:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kOptimized;
case WasmCode::Tier::kOther:
if (code->kind() == WasmCode::Kind::kInterpreterEntry)
return ExecutionTier::kInterpreter;
return {};
}
UNREACHABLE();
}
} // namespace
WasmCodeUpdate NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here. // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock()); DCHECK(!allocation_mutex_.TryLock());
// Skip publishing code if there is an active redirection to the interpreter WasmCodeUpdate update;
// for the given function index, in order to preserve the redirection.
if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) { if (!code->IsAnonymous()) {
DCHECK_LT(code->index(), num_functions()); DCHECK_LT(code->index(), num_functions());
DCHECK_LE(module_->num_imported_functions, code->index()); DCHECK_LE(module_->num_imported_functions, code->index());
// Update code table, except for interpreter entries that would overwrite // Assume an order of execution tiers that represents the quality of their
// existing code. // generated code.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
// Update code table but avoid to fall back to less optimized code. We use
// the new code if it was compiled with a higher tier and also if we cannot
// determine the tier.
uint32_t slot_idx = code->index() - module_->num_imported_functions; uint32_t slot_idx = code->index() - module_->num_imported_functions;
if (code->kind() != WasmCode::kInterpreterEntry || WasmCode* prior_code = code_table_[slot_idx];
code_table_[slot_idx] == nullptr) { update.prior_tier = GetExecutionTier(prior_code);
update.tier = GetExecutionTier(code.get());
bool update_code_table = !update.prior_tier.has_value() ||
!update.tier.has_value() ||
update.prior_tier.value() < update.tier.value();
if (update_code_table) {
code_table_[slot_idx] = code.get(); code_table_[slot_idx] = code.get();
} }
// Patch jump table. // Populate optimized code to the jump table unless there is an active
JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(), // redirection to the interpreter that should be preserved.
slot_idx, code->instruction_start(), bool update_jump_table =
WasmCode::kFlushICache); update_code_table && !has_interpreter_redirection(code->index());
}
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) { // Ensure that interpreter entries always populate to the jump table.
SetInterpreterRedirection(code->index()); if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
SetInterpreterRedirection(code->index());
update_jump_table = true;
}
if (update_jump_table) {
JumpTableAssembler::PatchJumpTableSlot(
jump_table_->instruction_start(), slot_idx, code->instruction_start(),
WasmCode::kFlushICache);
}
} }
WasmCode* ret = code.get(); update.code = code.get();
owned_code_.emplace_back(std::move(code)); owned_code_.emplace_back(std::move(code));
return ret; return update;
} }
WasmCode* NativeModule::AddDeserializedCode( WasmCode* NativeModule::AddDeserializedCode(
...@@ -726,7 +794,7 @@ WasmCode* NativeModule::AddDeserializedCode( ...@@ -726,7 +794,7 @@ WasmCode* NativeModule::AddDeserializedCode(
// Note: we do not flush the i-cache here, since the code needs to be // Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later. // relocated anyway. The caller is responsible for flushing the i-cache later.
return PublishCode(std::move(code)); return PublishCode(std::move(code)).code;
} }
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
...@@ -758,7 +826,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) { ...@@ -758,7 +826,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
OwnedVector<const uint8_t>{}, // source_pos OwnedVector<const uint8_t>{}, // source_pos
WasmCode::kJumpTable, // kind WasmCode::kJumpTable, // kind
WasmCode::kOther}}; // tier WasmCode::kOther}}; // tier
return PublishCode(std::move(code)); return PublishCode(std::move(code)).code;
} }
Vector<byte> NativeModule::AllocateForCode(size_t size) { Vector<byte> NativeModule::AllocateForCode(size_t size) {
...@@ -1202,34 +1270,11 @@ void NativeModule::SampleCodeSize( ...@@ -1202,34 +1270,11 @@ void NativeModule::SampleCodeSize(
histogram->AddSample(code_size_mb); histogram->AddSample(code_size_mb);
} }
namespace { WasmCodeUpdate NativeModule::AddCompiledCode(WasmCompilationResult result) {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
} // namespace
WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
return AddCompiledCode({&result, 1})[0]; return AddCompiledCode({&result, 1})[0];
} }
std::vector<WasmCode*> NativeModule::AddCompiledCode( std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) { Vector<WasmCompilationResult> results) {
DCHECK(!results.is_empty()); DCHECK(!results.is_empty());
// First, allocate code space for all the results. // First, allocate code space for all the results.
...@@ -1259,16 +1304,15 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode( ...@@ -1259,16 +1304,15 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
DCHECK_EQ(0, code_space.size()); DCHECK_EQ(0, code_space.size());
// Under the {allocation_mutex_}, publish the code. // Under the {allocation_mutex_}, publish the code.
std::vector<WasmCode*> returned_code; std::vector<WasmCodeUpdate> code_updates;
returned_code.reserve(results.size()); code_updates.reserve(results.size());
{ {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
for (auto& result : generated_code) { for (auto& result : generated_code)
returned_code.push_back(PublishCodeLocked(std::move(result))); code_updates.push_back(PublishCodeLocked(std::move(result)));
}
} }
return returned_code; return code_updates;
} }
void NativeModule::FreeCode(Vector<WasmCode* const> codes) { void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <vector> #include <vector>
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h" #include "src/builtins/builtins-definitions.h"
#include "src/handles.h" #include "src/handles.h"
#include "src/trap-handler/trap-handler.h" #include "src/trap-handler/trap-handler.h"
...@@ -72,6 +73,12 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final { ...@@ -72,6 +73,12 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool); DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
}; };
struct WasmCodeUpdate {
WasmCode* code = nullptr;
base::Optional<ExecutionTier> tier;
base::Optional<ExecutionTier> prior_tier;
};
class V8_EXPORT_PRIVATE WasmCode final { class V8_EXPORT_PRIVATE WasmCode final {
public: public:
enum Kind { enum Kind {
...@@ -270,9 +277,9 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -270,9 +277,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {PublishCode} makes the code available to the system by entering it into // {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the // the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object. // given {WasmCode} object.
WasmCode* PublishCode(std::unique_ptr<WasmCode>); WasmCodeUpdate PublishCode(std::unique_ptr<WasmCode>);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}. // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>); WasmCodeUpdate PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode( WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots, uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
...@@ -288,10 +295,12 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -288,10 +295,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Adds anonymous code for testing purposes. // Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code); WasmCode* AddCodeForTesting(Handle<Code> code);
// Use this to start lazy compilation for the entire module. It will use the // Use this to setup lazy compilation for the entire module ({UseLazyStubs})
// existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump // or for individual functions ({UseLazyStub}). It will use the existing
// table with trampolines to that runtime stub. // {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with
void SetLazyBuiltin(); // trampolines to that runtime stub.
void UseLazyStubs();
void UseLazyStub(uint32_t func_index);
// Initializes all runtime stubs by setting up entry addresses in the runtime // Initializes all runtime stubs by setting up entry addresses in the runtime
// stub table. It must be called exactly once per native module before adding // stub table. It must be called exactly once per native module before adding
...@@ -370,6 +379,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -370,6 +379,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; } UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; } void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; } bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); } Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); } const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; } std::shared_ptr<const WasmModule> shared_module() const { return module_; }
...@@ -394,8 +405,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -394,8 +405,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling }; enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
void SampleCodeSize(Counters*, CodeSamplingTime) const; void SampleCodeSize(Counters*, CodeSamplingTime) const;
WasmCode* AddCompiledCode(WasmCompilationResult); WasmCodeUpdate AddCompiledCode(WasmCompilationResult);
std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>); std::vector<WasmCodeUpdate> AddCompiledCode(Vector<WasmCompilationResult>);
// Free a set of functions of this module. Uncommits whole pages if possible. // Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all // The given vector must be ordered by the instruction start address, and all
...@@ -524,6 +535,7 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -524,6 +535,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler_ = kNoTrapHandler; UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false; bool is_executable_ = false;
bool lazy_compile_frozen_ = false; bool lazy_compile_frozen_ = false;
bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule); DISALLOW_COPY_AND_ASSIGN(NativeModule);
}; };
......
...@@ -195,6 +195,7 @@ struct V8_EXPORT_PRIVATE WasmModule { ...@@ -195,6 +195,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0; uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section. uint32_t num_declared_data_segments = 0; // From the DataCount section.
uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0}; WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index std::vector<uint32_t> signature_ids; // by signature index
......
...@@ -503,7 +503,12 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) { ...@@ -503,7 +503,12 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>(); size_t code_section_size = reader->Read<size_t>();
if (code_section_size == 0) return true; if (code_section_size == 0) {
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
native_module_->UseLazyStub(fn_index);
return true;
}
size_t constant_pool_offset = reader->Read<size_t>(); size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>(); size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>(); size_t handler_table_offset = reader->Read<size_t>();
...@@ -625,9 +630,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( ...@@ -625,9 +630,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null()); std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module(); NativeModule* native_module = module_object->native_module();
if (FLAG_wasm_lazy_compilation) { native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation);
native_module->SetLazyBuiltin();
}
NativeModuleDeserializer deserializer(native_module); NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize); Reader reader(data + kVersionSize);
......
...@@ -501,7 +501,9 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) { ...@@ -501,7 +501,9 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
WasmCompilationResult result = unit.ExecuteCompilation( WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(), &env, native_module->compilation_state()->GetWireBytesStorage(),
isolate()->counters(), &unused_detected_features); isolate()->counters(), &unused_detected_features);
WasmCode* code = native_module->AddCompiledCode(std::move(result)); WasmCodeUpdate code_update =
native_module->AddCompiledCode(std::move(result));
WasmCode* code = code_update.code;
DCHECK_NOT_NULL(code); DCHECK_NOT_NULL(code);
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate()); if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
} }
......
...@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext, ...@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext,
desc.instr_size = static_cast<int>(size); desc.instr_size = static_cast<int>(size);
std::unique_ptr<WasmCode> code = native_module->AddCode( std::unique_ptr<WasmCode> code = native_module->AddCode(
index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther); index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther);
return native_module->PublishCode(std::move(code)); return native_module->PublishCode(std::move(code)).code;
} }
size_t page() const { return AllocatePageSize(); } size_t page() const { return AllocatePageSize(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment