Commit a4b19dcc authored by Michael Starzinger's avatar Michael Starzinger Committed by Commit Bot

[wasm] No longer copy runtime stubs into each module.

This switches from copying entire runtime stubs into each module to only
having small jump table slots in each module that act as a trampoline to
the actual embedded builtin representing the runtime stub. This reduces
the memory footprint of modules.

R=clemensh@chromium.org

Change-Id: I3de528f7ebcc104f114ec32914d2b86e810d10d6
Reviewed-on: https://chromium-review.googlesource.com/c/1460474
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59575}
parent 176bee05
...@@ -242,9 +242,10 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate, ...@@ -242,9 +242,10 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
} }
} else if (RelocInfo::IsWasmStubCall(rmode) && host.is_wasm_code()) { } else if (RelocInfo::IsWasmStubCall(rmode) && host.is_wasm_code()) {
// Host is isolate-independent, try wasm native module instead. // Host is isolate-independent, try wasm native module instead.
wasm::WasmCode* code = host.as_wasm_code()->native_module()->Lookup( const char* runtime_stub_name =
relocinfo->wasm_stub_call_address()); host.as_wasm_code()->native_module()->GetRuntimeStubName(
out->AddFormatted(" ;; wasm stub: %s", code->GetRuntimeStubName()); relocinfo->wasm_stub_call_address());
out->AddFormatted(" ;; wasm stub: %s", runtime_stub_name);
} else if (RelocInfo::IsRuntimeEntry(rmode) && isolate && } else if (RelocInfo::IsRuntimeEntry(rmode) && isolate &&
isolate->deoptimizer_data() != nullptr) { isolate->deoptimizer_data() != nullptr) {
// A runtime entry relocinfo might be a deoptimization bailout. // A runtime entry relocinfo might be a deoptimization bailout.
......
...@@ -40,6 +40,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ...@@ -40,6 +40,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
PatchConstPool(); // force patching entries for partial const pool PatchConstPool(); // force patching entries for partial const pool
} }
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
}
void JumpTableAssembler::EmitJumpSlot(Address target) { void JumpTableAssembler::EmitJumpSlot(Address target) {
movq(kScratchRegister, static_cast<uint64_t>(target)); movq(kScratchRegister, static_cast<uint64_t>(target));
jmp(kScratchRegister); jmp(kScratchRegister);
...@@ -57,6 +61,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ...@@ -57,6 +61,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
} }
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
}
void JumpTableAssembler::EmitJumpSlot(Address target) { void JumpTableAssembler::EmitJumpSlot(Address target) {
jmp(target, RelocInfo::NONE); jmp(target, RelocInfo::NONE);
} }
...@@ -81,6 +89,11 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ...@@ -81,6 +89,11 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
EmitJumpSlot(lazy_compile_target); EmitJumpSlot(lazy_compile_target);
} }
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
CheckConstPool(true, false); // force emit of const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) { void JumpTableAssembler::EmitJumpSlot(Address target) {
// Note that {Move32BitImmediate} emits [ldr, constant] for the relocation // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
// mode used below, we need this to allow concurrent patching of this slot. // mode used below, we need this to allow concurrent patching of this slot.
...@@ -103,6 +116,11 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ...@@ -103,6 +116,11 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
} }
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
CheckConstPool(true, false); // force emit of const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) { void JumpTableAssembler::EmitJumpSlot(Address target) {
// TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
// patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
...@@ -193,6 +211,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, ...@@ -193,6 +211,10 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
UNIMPLEMENTED();
}
void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); } void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
void JumpTableAssembler::NopBytes(int bytes) { void JumpTableAssembler::NopBytes(int bytes) {
......
...@@ -27,7 +27,7 @@ namespace wasm { ...@@ -27,7 +27,7 @@ namespace wasm {
// //
// The above illustrates jump table lines {Li} containing slots {Si} with each // The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes. // line containing {n} slots and some padding {x} for alignment purposes.
class JumpTableAssembler : public TurboAssembler { class JumpTableAssembler : public MacroAssembler {
public: public:
// Translate an offset into the continuous jump table to a jump table index. // Translate an offset into the continuous jump table to a jump table index.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) { static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
...@@ -55,6 +55,16 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -55,6 +55,16 @@ class JumpTableAssembler : public TurboAssembler {
kJumpTableLineSize; kJumpTableLineSize;
} }
// Translate a stub slot index to an offset into the continuous jump table.
static uint32_t StubSlotIndexToOffset(uint32_t slot_index) {
return slot_index * kJumpTableStubSlotSize;
}
// Determine the size of a jump table containing only runtime stub slots.
static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
return slot_count * kJumpTableStubSlotSize;
}
static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index, static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
uint32_t func_index, uint32_t func_index,
Address lazy_compile_target, Address lazy_compile_target,
...@@ -68,6 +78,18 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -68,6 +78,18 @@ class JumpTableAssembler : public TurboAssembler {
} }
} }
static void EmitRuntimeStubSlot(Address base, uint32_t slot_index,
Address builtin_target,
WasmCode::FlushICache flush_i_cache) {
Address slot = base + StubSlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitRuntimeStubSlot(builtin_target);
jtasm.NopBytes(kJumpTableStubSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
FlushInstructionCache(slot, kJumpTableStubSlotSize);
}
}
static void PatchJumpTableSlot(Address base, uint32_t slot_index, static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target, Address new_target,
WasmCode::FlushICache flush_i_cache) { WasmCode::FlushICache flush_i_cache) {
...@@ -83,7 +105,7 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -83,7 +105,7 @@ class JumpTableAssembler : public TurboAssembler {
private: private:
// Instantiate a {JumpTableAssembler} for patching. // Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256) explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(nullptr, JumpTableAssemblerOptions(), : MacroAssembler(nullptr, JumpTableAssemblerOptions(),
CodeObjectRequired::kNo, CodeObjectRequired::kNo,
ExternalAssemblerBuffer( ExternalAssemblerBuffer(
reinterpret_cast<uint8_t*>(slot_addr), size)) {} reinterpret_cast<uint8_t*>(slot_addr), size)) {}
...@@ -94,15 +116,19 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -94,15 +116,19 @@ class JumpTableAssembler : public TurboAssembler {
#if V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 18; static constexpr int kJumpTableSlotSize = 18;
static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64; static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 10; static constexpr int kJumpTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableLineSize = 5 * kInstrSize; static constexpr int kJumpTableLineSize = 5 * kInstrSize;
static constexpr int kJumpTableSlotSize = 5 * kInstrSize; static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64 #elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstrSize; static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize; static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X #elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 20; static constexpr int kJumpTableLineSize = 20;
static constexpr int kJumpTableSlotSize = 20; static constexpr int kJumpTableSlotSize = 20;
...@@ -124,6 +150,7 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -124,6 +150,7 @@ class JumpTableAssembler : public TurboAssembler {
#else #else
static constexpr int kJumpTableLineSize = 1; static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1; static constexpr int kJumpTableSlotSize = 1;
static constexpr int kJumpTableStubSlotSize = 1;
#endif #endif
static constexpr int kJumpTableSlotsPerLine = static constexpr int kJumpTableSlotsPerLine =
...@@ -142,6 +169,8 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -142,6 +169,8 @@ class JumpTableAssembler : public TurboAssembler {
void EmitLazyCompileJumpSlot(uint32_t func_index, void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target); Address lazy_compile_target);
void EmitRuntimeStubSlot(Address builtin_target);
void EmitJumpSlot(Address target); void EmitJumpSlot(Address target);
void NopBytes(int bytes); void NopBytes(int bytes);
......
...@@ -193,19 +193,6 @@ void WasmCode::LogCode(Isolate* isolate) const { ...@@ -193,19 +193,6 @@ void WasmCode::LogCode(Isolate* isolate) const {
} }
} }
const char* WasmCode::GetRuntimeStubName() const {
DCHECK_EQ(WasmCode::kRuntimeStub, kind());
#define RETURN_NAME(Name) \
if (native_module_->runtime_stub_table_[WasmCode::k##Name] == this) { \
return #Name; \
}
#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
#undef RETURN_NAME_TRAP
#undef RETURN_NAME
return "<unknown>";
}
void WasmCode::Validate() const { void WasmCode::Validate() const {
#ifdef DEBUG #ifdef DEBUG
// We expect certain relocation info modes to never appear in {WasmCode} // We expect certain relocation info modes to never appear in {WasmCode}
...@@ -220,6 +207,7 @@ void WasmCode::Validate() const { ...@@ -220,6 +207,7 @@ void WasmCode::Validate() const {
WasmCode* code = native_module_->Lookup(target); WasmCode* code = native_module_->Lookup(target);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(WasmCode::kJumpTable, code->kind()); CHECK_EQ(WasmCode::kJumpTable, code->kind());
CHECK_EQ(native_module()->jump_table_, code);
CHECK(code->contains(target)); CHECK(code->contains(target));
break; break;
} }
...@@ -227,8 +215,14 @@ void WasmCode::Validate() const { ...@@ -227,8 +215,14 @@ void WasmCode::Validate() const {
Address target = it.rinfo()->wasm_stub_call_address(); Address target = it.rinfo()->wasm_stub_call_address();
WasmCode* code = native_module_->Lookup(target); WasmCode* code = native_module_->Lookup(target);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
#ifdef V8_EMBEDDED_BUILTINS
CHECK_EQ(WasmCode::kJumpTable, code->kind());
CHECK_EQ(native_module()->runtime_stub_table_, code);
CHECK(code->contains(target));
#else
CHECK_EQ(WasmCode::kRuntimeStub, code->kind()); CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
CHECK_EQ(target, code->instruction_start()); CHECK_EQ(target, code->instruction_start());
#endif
break; break;
} }
case RelocInfo::INTERNAL_REFERENCE: case RelocInfo::INTERNAL_REFERENCE:
...@@ -407,7 +401,8 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled, ...@@ -407,7 +401,8 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
code_table_.reset(new WasmCode*[num_wasm_functions]); code_table_.reset(new WasmCode*[num_wasm_functions]);
memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*)); memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
jump_table_ = CreateEmptyJumpTable(num_wasm_functions); jump_table_ = CreateEmptyJumpTable(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
} }
} }
...@@ -422,7 +417,8 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) { ...@@ -422,7 +417,8 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
code_table_.reset(new_table); code_table_.reset(new_table);
// Re-allocate jump table. // Re-allocate jump table.
jump_table_ = CreateEmptyJumpTable(max_functions); jump_table_ = CreateEmptyJumpTable(
JumpTableAssembler::SizeForNumberOfSlots(max_functions));
} }
void NativeModule::LogWasmCodes(Isolate* isolate) { void NativeModule::LogWasmCodes(Isolate* isolate) {
...@@ -508,16 +504,48 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) { ...@@ -508,16 +504,48 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
} }
void NativeModule::SetRuntimeStubs(Isolate* isolate) { void NativeModule::SetRuntimeStubs(Isolate* isolate) {
// TODO(mstarzinger): Switch this from accessing the {Isolate} to using the
// embedded blob directly. This will allow us to do this from the background.
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_NULL(runtime_stub_table_[0]); // Only called once. DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
#ifdef V8_EMBEDDED_BUILTINS
WasmCode* jump_table =
CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots(
WasmCode::kRuntimeStubCount));
Address base = jump_table->instruction_start();
#define RUNTIME_STUB(Name) {Builtins::k##Name, WasmCode::k##Name},
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
std::pair<Builtins::Name, WasmCode::RuntimeStubId> wasm_runtime_stubs[] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
for (auto pair : wasm_runtime_stubs) {
Handle<Code> builtin_code = isolate->builtins()->builtin_handle(pair.first);
CHECK(builtin_code->is_off_heap_trampoline());
JumpTableAssembler::EmitRuntimeStubSlot(
base, pair.second, builtin_code->OffHeapInstructionStart(),
WasmCode::kNoFlushICache);
uint32_t slot_offset =
JumpTableAssembler::StubSlotIndexToOffset(pair.second);
runtime_stub_entries_[pair.second] = base + slot_offset;
}
FlushInstructionCache(jump_table->instructions().start(),
jump_table->instructions().size());
DCHECK_NULL(runtime_stub_table_);
runtime_stub_table_ = jump_table;
#else // V8_EMBEDDED_BUILTINS
USE(runtime_stub_table_); // Actually unused, but avoids ifdef's in header.
#define COPY_BUILTIN(Name) \ #define COPY_BUILTIN(Name) \
runtime_stub_table_[WasmCode::k##Name] = \ runtime_stub_entries_[WasmCode::k##Name] = \
AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \ AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
WasmCode::kRuntimeStub, #Name); WasmCode::kRuntimeStub, #Name) \
->instruction_start();
#define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name) #define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP) WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
#undef COPY_BUILTIN_TRAP #undef COPY_BUILTIN_TRAP
#undef COPY_BUILTIN #undef COPY_BUILTIN
#endif // V8_EMBEDDED_BUILTINS
DCHECK_NE(kNullAddress, runtime_stub_entries_[0]);
} }
WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind, WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
...@@ -580,10 +608,9 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind, ...@@ -580,10 +608,9 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
if (RelocInfo::IsWasmStubCall(mode)) { if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag(); uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount); DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
WasmCode* code = Address entry = runtime_stub_entry(
runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag)); static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
it.rinfo()->set_wasm_stub_call_address(code->instruction_start(), it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
SKIP_ICACHE_FLUSH);
} else { } else {
it.rinfo()->apply(delta); it.rinfo()->apply(delta);
} }
...@@ -641,10 +668,9 @@ WasmCode* NativeModule::AddCode( ...@@ -641,10 +668,9 @@ WasmCode* NativeModule::AddCode(
} else if (RelocInfo::IsWasmStubCall(mode)) { } else if (RelocInfo::IsWasmStubCall(mode)) {
uint32_t stub_call_tag = it.rinfo()->wasm_call_tag(); uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount); DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
WasmCode* code = Address entry = runtime_stub_entry(
runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag)); static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
it.rinfo()->set_wasm_stub_call_address(code->instruction_start(), it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
SKIP_ICACHE_FLUSH);
} else { } else {
it.rinfo()->apply(delta); it.rinfo()->apply(delta);
} }
...@@ -712,11 +738,10 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const { ...@@ -712,11 +738,10 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
return result; return result;
} }
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) { WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
// Only call this if we really need a jump table. // Only call this if we really need a jump table.
DCHECK_LT(0, num_wasm_functions); DCHECK_LT(0, jump_table_size);
OwnedVector<byte> instructions = OwnedVector<byte>::New( OwnedVector<byte> instructions = OwnedVector<byte>::New(jump_table_size);
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
memset(instructions.start(), 0, instructions.size()); memset(instructions.start(), 0, instructions.size());
return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
instructions.as_vector(), // instructions instructions.as_vector(), // instructions
...@@ -910,6 +935,18 @@ void NativeModule::DisableTrapHandler() { ...@@ -910,6 +935,18 @@ void NativeModule::DisableTrapHandler() {
// recycled. // recycled.
} }
const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
#define RETURN_NAME(Name) \
if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
return #Name; \
}
#define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
#undef RETURN_NAME_TRAP
#undef RETURN_NAME
return "<unknown>";
}
NativeModule::~NativeModule() { NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this)); TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
// Cancel all background compilation before resetting any field of the // Cancel all background compilation before resetting any field of the
......
...@@ -134,8 +134,6 @@ class V8_EXPORT_PRIVATE WasmCode final { ...@@ -134,8 +134,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
return protected_instructions_.as_vector(); return protected_instructions_.as_vector();
} }
const char* GetRuntimeStubName() const;
void Validate() const; void Validate() const;
void Print(const char* name = nullptr) const; void Print(const char* name = nullptr) const;
void MaybePrint(const char* name = nullptr) const; void MaybePrint(const char* name = nullptr) const;
...@@ -286,11 +284,11 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -286,11 +284,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
bool has_code(uint32_t index) const { return code(index) != nullptr; } bool has_code(uint32_t index) const { return code(index) != nullptr; }
WasmCode* runtime_stub(WasmCode::RuntimeStubId index) const { Address runtime_stub_entry(WasmCode::RuntimeStubId index) const {
DCHECK_LT(index, WasmCode::kRuntimeStubCount); DCHECK_LT(index, WasmCode::kRuntimeStubCount);
WasmCode* code = runtime_stub_table_[index]; Address entry_address = runtime_stub_entries_[index];
DCHECK_NOT_NULL(code); DCHECK_NE(kNullAddress, entry_address);
return code; return entry_address;
} }
Address jump_table_start() const { Address jump_table_start() const {
...@@ -364,6 +362,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -364,6 +362,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
const WasmFeatures& enabled_features() const { return enabled_features_; } const WasmFeatures& enabled_features() const { return enabled_features_; }
const char* GetRuntimeStubName(Address runtime_stub_entry) const;
private: private:
friend class WasmCode; friend class WasmCode;
friend class WasmCodeManager; friend class WasmCodeManager;
...@@ -395,7 +395,7 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -395,7 +395,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
OwnedVector<const byte> source_position_table, OwnedVector<const byte> source_position_table,
WasmCode::Kind, WasmCode::Tier); WasmCode::Kind, WasmCode::Tier);
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions); WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
// Hold the {allocation_mutex_} when calling this method. // Hold the {allocation_mutex_} when calling this method.
void InstallCode(WasmCode* code); void InstallCode(WasmCode* code);
...@@ -441,7 +441,11 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -441,7 +441,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// {WireBytesStorage}, held by background compile tasks. // {WireBytesStorage}, held by background compile tasks.
std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_; std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr}; // Contains entry points for runtime stub calls via {WASM_STUB_CALL}.
Address runtime_stub_entries_[WasmCode::kRuntimeStubCount] = {kNullAddress};
// Jump table used for runtime stubs (i.e. trampolines to embedded builtins).
WasmCode* runtime_stub_table_ = nullptr;
// Jump table used to easily redirect wasm function calls. // Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr; WasmCode* jump_table_ = nullptr;
......
...@@ -289,9 +289,8 @@ NativeModuleSerializer::NativeModuleSerializer( ...@@ -289,9 +289,8 @@ NativeModuleSerializer::NativeModuleSerializer(
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist // TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache. // the unique ones, i.e. the cache.
for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) { for (uint32_t i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
Address addr = Address addr = native_module_->runtime_stub_entry(
native_module_->runtime_stub(static_cast<WasmCode::RuntimeStubId>(i)) static_cast<WasmCode::RuntimeStubId>(i));
->instruction_start();
wasm_stub_targets_lookup_.insert(std::make_pair(addr, i)); wasm_stub_targets_lookup_.insert(std::make_pair(addr, i));
} }
} }
...@@ -543,10 +542,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { ...@@ -543,10 +542,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
case RelocInfo::WASM_STUB_CALL: { case RelocInfo::WASM_STUB_CALL: {
uint32_t tag = GetWasmCalleeTag(iter.rinfo()); uint32_t tag = GetWasmCalleeTag(iter.rinfo());
DCHECK_LT(tag, WasmCode::kRuntimeStubCount); DCHECK_LT(tag, WasmCode::kRuntimeStubCount);
Address target = Address target = native_module_->runtime_stub_entry(
native_module_ static_cast<WasmCode::RuntimeStubId>(tag));
->runtime_stub(static_cast<WasmCode::RuntimeStubId>(tag))
->instruction_start();
iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH); iter.rinfo()->set_wasm_stub_call_address(target, SKIP_ICACHE_FLUSH);
break; break;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment