Commit be470c55 authored by Frederik Gossen's avatar Frederik Gossen Committed by Commit Bot

Revert "[wasm-hints] Enabled Lazy Compilation by Hint"

This reverts commit 09fa63a9.

Reason for revert: Falkes on https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20shared/29942

Original change's description:
> [wasm-hints] Enabled Lazy Compilation by Hint
> 
> Hints for lazy compilation are now taken into consideration. If the
> custom hints section suggests lazy compilatin we do so unless the module
> consists of a single function.
> 
> Bug: v8:9003
> Change-Id: Ibdc400453cee20d4d5c814733887b38fb675b220
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1535827
> Commit-Queue: Frederik Gossen <frgossen@google.com>
> Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#60557}

TBR=mstarzinger@chromium.org,clemensh@chromium.org,frgossen@google.com

Change-Id: I18dd424fe8cf05f220f7498bb1ebe4b9fce7d240
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9003
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1547668Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60558}
parent 09fa63a9
...@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine, ...@@ -5948,7 +5948,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
std::move(result.source_positions), wasm::WasmCode::kFunction, std::move(result.source_positions), wasm::WasmCode::kFunction,
wasm::WasmCode::kOther); wasm::WasmCode::kOther);
// TODO(titzer): add counters for math intrinsic code size / allocation // TODO(titzer): add counters for math intrinsic code size / allocation
return native_module->PublishCode(std::move(wasm_code)).code; return native_module->PublishCode(std::move(wasm_code));
} }
wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
...@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine, ...@@ -6012,7 +6012,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
std::move(result.protected_instructions), std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper, std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
wasm::WasmCode::kOther); wasm::WasmCode::kOther);
return native_module->PublishCode(std::move(wasm_code)).code; return native_module->PublishCode(std::move(wasm_code));
} }
wasm::WasmCompilationResult CompileWasmInterpreterEntry( wasm::WasmCompilationResult CompileWasmInterpreterEntry(
...@@ -6243,9 +6243,7 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation( ...@@ -6243,9 +6243,7 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
// TODO(bradnelson): Improve histogram handling of size_t. // TODO(bradnelson): Improve histogram handling of size_t.
counters->wasm_compile_function_peak_memory_bytes()->AddSample( counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size())); static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
auto result = info.ReleaseWasmCompilationResult(); return std::move(*info.ReleaseWasmCompilationResult());
DCHECK_EQ(wasm::ExecutionTier::kOptimized, result->result_tier);
return std::move(*result);
} }
wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation( wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
...@@ -6262,7 +6260,6 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation( ...@@ -6262,7 +6260,6 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_, wasm_unit_->wasm_engine_, env->enabled_features, wasm_unit_->func_index_,
func_body.sig); func_body.sig);
DCHECK(result.succeeded()); DCHECK(result.succeeded());
DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
return result; return result;
} }
......
...@@ -21,7 +21,6 @@ namespace wasm { ...@@ -21,7 +21,6 @@ namespace wasm {
class NativeModule; class NativeModule;
class WasmCode; class WasmCode;
struct WasmCompilationResult;
class WasmError; class WasmError;
enum RuntimeExceptionSupport : bool { enum RuntimeExceptionSupport : bool {
...@@ -121,8 +120,7 @@ class CompilationState { ...@@ -121,8 +120,7 @@ class CompilationState {
bool failed() const; bool failed() const;
void FinishUnit(WasmCompilationResult); void OnFinishedUnit(ExecutionTier, WasmCode*);
void FinishUnits(Vector<WasmCompilationResult>);
private: private:
friend class NativeModule; friend class NativeModule;
......
...@@ -52,7 +52,6 @@ struct WasmCompilationResult { ...@@ -52,7 +52,6 @@ struct WasmCompilationResult {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult); MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
bool succeeded() const { return code_desc.buffer != nullptr; } bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); } operator bool() const { return succeeded(); }
CodeDesc code_desc; CodeDesc code_desc;
......
This diff is collapsed.
...@@ -1008,10 +1008,7 @@ class ModuleDecoderImpl : public Decoder { ...@@ -1008,10 +1008,7 @@ class ModuleDecoderImpl : public Decoder {
} }
// Decode sequence of compilation hints. // Decode sequence of compilation hints.
if (decoder.ok()) { if (decoder.ok()) module_->compilation_hints.reserve(hint_count);
module_->compilation_hints.reserve(hint_count);
module_->num_lazy_compilation_hints = 0;
}
for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) { for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
TRACE("DecodeCompilationHints[%d] module+%d\n", i, TRACE("DecodeCompilationHints[%d] module+%d\n", i,
static_cast<int>(pc_ - start_)); static_cast<int>(pc_ - start_));
...@@ -1050,18 +1047,12 @@ class ModuleDecoderImpl : public Decoder { ...@@ -1050,18 +1047,12 @@ class ModuleDecoderImpl : public Decoder {
} }
// Happily accept compilation hint. // Happily accept compilation hint.
if (decoder.ok()) { if (decoder.ok()) module_->compilation_hints.push_back(std::move(hint));
if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
module_->num_lazy_compilation_hints++;
}
module_->compilation_hints.push_back(std::move(hint));
}
} }
// If section was invalid reset compilation hints. // If section was invalid reset compilation hints.
if (decoder.failed()) { if (decoder.failed()) {
module_->compilation_hints.clear(); module_->compilation_hints.clear();
module_->num_lazy_compilation_hints = 0;
} }
// @TODO(frgossen) Skip the whole compilation hints section in the outer // @TODO(frgossen) Skip the whole compilation hints section in the outer
......
...@@ -433,25 +433,19 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) { ...@@ -433,25 +433,19 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
return AddAndPublishAnonymousCode(code, WasmCode::kFunction); return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
} }
void NativeModule::UseLazyStubs() { void NativeModule::SetLazyBuiltin() {
uint32_t start = module_->num_imported_functions; uint32_t num_wasm_functions = module_->num_declared_functions;
uint32_t end = start + module_->num_declared_functions; if (num_wasm_functions == 0) return;
for (uint32_t func_index = start; func_index < end; func_index++) { // Fill the jump table with jumps to the lazy compile stub.
UseLazyStub(func_index); Address lazy_compile_target = runtime_stub_entry(WasmCode::kWasmCompileLazy);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), i,
i + module_->num_imported_functions, lazy_compile_target,
WasmCode::kNoFlushICache);
} }
} FlushInstructionCache(jump_table_->instructions().start(),
jump_table_->instructions().size());
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
JumpTableAssembler::EmitLazyCompileJumpSlot(
jump_table_->instruction_start(), slot_index, func_index,
runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
} }
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS} // TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
...@@ -587,7 +581,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code, ...@@ -587,7 +581,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
new_code->MaybePrint(name); new_code->MaybePrint(name);
new_code->Validate(); new_code->Validate();
return PublishCode(std::move(new_code)).code; return PublishCode(std::move(new_code));
} }
std::unique_ptr<WasmCode> NativeModule::AddCode( std::unique_ptr<WasmCode> NativeModule::AddCode(
...@@ -673,98 +667,39 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace( ...@@ -673,98 +667,39 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
return code; return code;
} }
WasmCodeUpdate NativeModule::PublishCode(std::unique_ptr<WasmCode> code) { WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code)); return PublishCodeLocked(std::move(code));
} }
namespace { WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
// TODO(frgossen): We should merge ExecutionTier with WasmCode::Tier.
base::Optional<ExecutionTier> GetExecutionTier(WasmCode* code) {
if (code == nullptr) return {};
switch (code->tier()) {
case WasmCode::Tier::kLiftoff:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kBaseline;
case WasmCode::Tier::kTurbofan:
DCHECK_EQ(code->kind(), WasmCode::Kind::kFunction);
return ExecutionTier::kOptimized;
case WasmCode::Tier::kOther:
if (code->kind() == WasmCode::Kind::kInterpreterEntry)
return ExecutionTier::kInterpreter;
return {};
}
UNREACHABLE();
}
} // namespace
WasmCodeUpdate NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here. // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock()); DCHECK(!allocation_mutex_.TryLock());
base::Optional<ExecutionTier> prior_tier;
base::Optional<ExecutionTier> tier;
// Skip publishing code if there is an active redirection to the interpreter // Skip publishing code if there is an active redirection to the interpreter
// for the given function index, in order to preserve the redirection. // for the given function index, in order to preserve the redirection.
if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) { if (!code->IsAnonymous() && !has_interpreter_redirection(code->index())) {
DCHECK_LT(code->index(), num_functions()); DCHECK_LT(code->index(), num_functions());
DCHECK_LE(module_->num_imported_functions, code->index()); DCHECK_LE(module_->num_imported_functions, code->index());
// Assume an order of execution tiers that represents the quality of their // Update code table, except for interpreter entries that would overwrite
// generated code. // existing code.
static_assert(ExecutionTier::kInterpreter < ExecutionTier::kBaseline &&
ExecutionTier::kBaseline < ExecutionTier::kOptimized,
"Assume an order on execution tiers");
// Update code table but avoid to fall back to less optimized code. We use
// the new code if it was compiled with a higher tier and also if we cannot
// determine the tier.
uint32_t slot_idx = code->index() - module_->num_imported_functions; uint32_t slot_idx = code->index() - module_->num_imported_functions;
WasmCode* prior_code = code_table_[slot_idx]; if (code->kind() != WasmCode::kInterpreterEntry ||
prior_tier = GetExecutionTier(prior_code); code_table_[slot_idx] == nullptr) {
tier = GetExecutionTier(code.get());
bool code_upgrade = !prior_tier.has_value() || !tier.has_value() ||
prior_tier.value() < tier.value();
if (code_upgrade) {
code_table_[slot_idx] = code.get(); code_table_[slot_idx] = code.get();
} }
// Patch jump table. Ensure to use optimized code and interpreter entries. // Patch jump table.
if (code_upgrade || code->kind_ == WasmCode::Kind::kInterpreterEntry) { JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
JumpTableAssembler::PatchJumpTableSlot( slot_idx, code->instruction_start(),
jump_table_->instruction_start(), slot_idx, code->instruction_start(), WasmCode::kFlushICache);
WasmCode::kFlushICache);
}
} }
if (code->kind_ == WasmCode::Kind::kInterpreterEntry) { if (code->kind_ == WasmCode::Kind::kInterpreterEntry) {
SetInterpreterRedirection(code->index()); SetInterpreterRedirection(code->index());
} }
WasmCodeUpdate update; WasmCode* ret = code.get();
update.code = code.get();
update.tier = tier;
update.prior_tier = prior_tier;
owned_code_.emplace_back(std::move(code)); owned_code_.emplace_back(std::move(code));
return update; return ret;
} }
WasmCode* NativeModule::AddDeserializedCode( WasmCode* NativeModule::AddDeserializedCode(
...@@ -791,7 +726,7 @@ WasmCode* NativeModule::AddDeserializedCode( ...@@ -791,7 +726,7 @@ WasmCode* NativeModule::AddDeserializedCode(
// Note: we do not flush the i-cache here, since the code needs to be // Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later. // relocated anyway. The caller is responsible for flushing the i-cache later.
return PublishCode(std::move(code)).code; return PublishCode(std::move(code));
} }
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
...@@ -823,7 +758,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) { ...@@ -823,7 +758,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
OwnedVector<const uint8_t>{}, // source_pos OwnedVector<const uint8_t>{}, // source_pos
WasmCode::kJumpTable, // kind WasmCode::kJumpTable, // kind
WasmCode::kOther}}; // tier WasmCode::kOther}}; // tier
return PublishCode(std::move(code)).code; return PublishCode(std::move(code));
} }
Vector<byte> NativeModule::AllocateForCode(size_t size) { Vector<byte> NativeModule::AllocateForCode(size_t size) {
...@@ -1267,11 +1202,34 @@ void NativeModule::SampleCodeSize( ...@@ -1267,11 +1202,34 @@ void NativeModule::SampleCodeSize(
histogram->AddSample(code_size_mb); histogram->AddSample(code_size_mb);
} }
WasmCodeUpdate NativeModule::AddCompiledCode(WasmCompilationResult result) { namespace {
WasmCode::Tier GetCodeTierForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Tier::kOther;
case ExecutionTier::kBaseline:
return WasmCode::Tier::kLiftoff;
case ExecutionTier::kOptimized:
return WasmCode::Tier::kTurbofan;
}
}
WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
switch (tier) {
case ExecutionTier::kInterpreter:
return WasmCode::Kind::kInterpreterEntry;
case ExecutionTier::kBaseline:
case ExecutionTier::kOptimized:
return WasmCode::Kind::kFunction;
}
}
} // namespace
WasmCode* NativeModule::AddCompiledCode(WasmCompilationResult result) {
return AddCompiledCode({&result, 1})[0]; return AddCompiledCode({&result, 1})[0];
} }
std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode( std::vector<WasmCode*> NativeModule::AddCompiledCode(
Vector<WasmCompilationResult> results) { Vector<WasmCompilationResult> results) {
DCHECK(!results.is_empty()); DCHECK(!results.is_empty());
// First, allocate code space for all the results. // First, allocate code space for all the results.
...@@ -1301,15 +1259,16 @@ std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode( ...@@ -1301,15 +1259,16 @@ std::vector<WasmCodeUpdate> NativeModule::AddCompiledCode(
DCHECK_EQ(0, code_space.size()); DCHECK_EQ(0, code_space.size());
// Under the {allocation_mutex_}, publish the code. // Under the {allocation_mutex_}, publish the code.
std::vector<WasmCodeUpdate> code_updates; std::vector<WasmCode*> returned_code;
code_updates.reserve(results.size()); returned_code.reserve(results.size());
{ {
base::MutexGuard lock(&allocation_mutex_); base::MutexGuard lock(&allocation_mutex_);
for (auto& result : generated_code) for (auto& result : generated_code) {
code_updates.push_back(PublishCodeLocked(std::move(result))); returned_code.push_back(PublishCodeLocked(std::move(result)));
}
} }
return code_updates; return returned_code;
} }
void NativeModule::FreeCode(Vector<WasmCode* const> codes) { void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <vector> #include <vector>
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h" #include "src/builtins/builtins-definitions.h"
#include "src/handles.h" #include "src/handles.h"
#include "src/trap-handler/trap-handler.h" #include "src/trap-handler/trap-handler.h"
...@@ -73,12 +72,6 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final { ...@@ -73,12 +72,6 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool); DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
}; };
struct WasmCodeUpdate {
WasmCode* code = nullptr;
base::Optional<ExecutionTier> tier;
base::Optional<ExecutionTier> prior_tier;
};
class V8_EXPORT_PRIVATE WasmCode final { class V8_EXPORT_PRIVATE WasmCode final {
public: public:
enum Kind { enum Kind {
...@@ -277,9 +270,9 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -277,9 +270,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {PublishCode} makes the code available to the system by entering it into // {PublishCode} makes the code available to the system by entering it into
// the code table and patching the jump table. It returns a raw pointer to the // the code table and patching the jump table. It returns a raw pointer to the
// given {WasmCode} object. // given {WasmCode} object.
WasmCodeUpdate PublishCode(std::unique_ptr<WasmCode>); WasmCode* PublishCode(std::unique_ptr<WasmCode>);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}. // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCodeUpdate PublishCodeLocked(std::unique_ptr<WasmCode>); WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
WasmCode* AddDeserializedCode( WasmCode* AddDeserializedCode(
uint32_t index, Vector<const byte> instructions, uint32_t stack_slots, uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
...@@ -295,12 +288,10 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -295,12 +288,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Adds anonymous code for testing purposes. // Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code); WasmCode* AddCodeForTesting(Handle<Code> code);
// Use this to setup lazy compilation for the entire module ({UseLazyStubs}) // Use this to start lazy compilation for the entire module. It will use the
// or for individual functions ({UseLazyStub}). It will use the existing // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
// {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with // table with trampolines to that runtime stub.
// trampolines to that runtime stub. void SetLazyBuiltin();
void UseLazyStubs();
void UseLazyStub(uint32_t func_index);
// Initializes all runtime stubs by setting up entry addresses in the runtime // Initializes all runtime stubs by setting up entry addresses in the runtime
// stub table. It must be called exactly once per native module before adding // stub table. It must be called exactly once per native module before adding
...@@ -379,8 +370,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -379,8 +370,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; } UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; } void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; } bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); } Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); } const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; } std::shared_ptr<const WasmModule> shared_module() const { return module_; }
...@@ -405,8 +394,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -405,8 +394,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling }; enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
void SampleCodeSize(Counters*, CodeSamplingTime) const; void SampleCodeSize(Counters*, CodeSamplingTime) const;
WasmCodeUpdate AddCompiledCode(WasmCompilationResult); WasmCode* AddCompiledCode(WasmCompilationResult);
std::vector<WasmCodeUpdate> AddCompiledCode(Vector<WasmCompilationResult>); std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
// Free a set of functions of this module. Uncommits whole pages if possible. // Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all // The given vector must be ordered by the instruction start address, and all
...@@ -535,7 +524,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -535,7 +524,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler_ = kNoTrapHandler; UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool is_executable_ = false; bool is_executable_ = false;
bool lazy_compile_frozen_ = false; bool lazy_compile_frozen_ = false;
bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule); DISALLOW_COPY_AND_ASSIGN(NativeModule);
}; };
......
...@@ -195,7 +195,6 @@ struct V8_EXPORT_PRIVATE WasmModule { ...@@ -195,7 +195,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0; uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section. uint32_t num_declared_data_segments = 0; // From the DataCount section.
uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0}; WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index std::vector<uint32_t> signature_ids; // by signature index
......
...@@ -503,12 +503,7 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) { ...@@ -503,12 +503,7 @@ bool NativeModuleDeserializer::ReadHeader(Reader* reader) {
bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
size_t code_section_size = reader->Read<size_t>(); size_t code_section_size = reader->Read<size_t>();
if (code_section_size == 0) { if (code_section_size == 0) return true;
DCHECK(FLAG_wasm_lazy_compilation ||
native_module_->enabled_features().compilation_hints);
native_module_->UseLazyStub(fn_index);
return true;
}
size_t constant_pool_offset = reader->Read<size_t>(); size_t constant_pool_offset = reader->Read<size_t>();
size_t safepoint_table_offset = reader->Read<size_t>(); size_t safepoint_table_offset = reader->Read<size_t>();
size_t handler_table_offset = reader->Read<size_t>(); size_t handler_table_offset = reader->Read<size_t>();
...@@ -630,8 +625,9 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule( ...@@ -630,8 +625,9 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null()); std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module(); NativeModule* native_module = module_object->native_module();
native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation); if (FLAG_wasm_lazy_compilation) {
native_module->SetLazyBuiltin();
}
NativeModuleDeserializer deserializer(native_module); NativeModuleDeserializer deserializer(native_module);
Reader reader(data + kVersionSize); Reader reader(data + kVersionSize);
......
...@@ -501,9 +501,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) { ...@@ -501,9 +501,7 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
WasmCompilationResult result = unit.ExecuteCompilation( WasmCompilationResult result = unit.ExecuteCompilation(
&env, native_module->compilation_state()->GetWireBytesStorage(), &env, native_module->compilation_state()->GetWireBytesStorage(),
isolate()->counters(), &unused_detected_features); isolate()->counters(), &unused_detected_features);
WasmCodeUpdate code_update = WasmCode* code = native_module->AddCompiledCode(std::move(result));
native_module->AddCompiledCode(std::move(result));
WasmCode* code = code_update.code;
DCHECK_NOT_NULL(code); DCHECK_NOT_NULL(code);
if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate()); if (WasmCode::ShouldBeLogged(isolate())) code->LogCode(isolate());
} }
......
...@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext, ...@@ -177,7 +177,7 @@ class WasmCodeManagerTest : public TestWithContext,
desc.instr_size = static_cast<int>(size); desc.instr_size = static_cast<int>(size);
std::unique_ptr<WasmCode> code = native_module->AddCode( std::unique_ptr<WasmCode> code = native_module->AddCode(
index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther); index, desc, 0, 0, {}, {}, WasmCode::kFunction, WasmCode::kOther);
return native_module->PublishCode(std::move(code)).code; return native_module->PublishCode(std::move(code));
} }
size_t page() const { return AllocatePageSize(); } size_t page() const { return AllocatePageSize(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment