Commit 733b7c82 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Introduce jump table

This introduces the concept of a jump table for WebAssembly, which is
used for every direct and indirect call to any WebAssembly function.
For lazy compilation, it will initially contain code to call the
WasmCompileLazy builtin, where it passes the function index to be
called.
For non-lazy-compilation, it will contain a jump to the actual code.
The jump table allows to easily redirect functions for lazy
compilation, tier-up, debugging and (in the future) code aging. After
this CL, we will not need to patch existing code any more for any of
these operations.

R=mstarzinger@chromium.org, titzer@chromium.org

Bug: v8:7758
Change-Id: I45f9983c2b06ae81bf5ce9847f4542fb48844a4f
Reviewed-on: https://chromium-review.googlesource.com/1097075
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53805}
parent 3e5db487
...@@ -1153,6 +1153,7 @@ int Operand::InstructionsRequired(const Assembler* assembler, ...@@ -1153,6 +1153,7 @@ int Operand::InstructionsRequired(const Assembler* assembler,
void Assembler::Move32BitImmediate(Register rd, const Operand& x, void Assembler::Move32BitImmediate(Register rd, const Operand& x,
Condition cond) { Condition cond) {
if (UseMovImmediateLoad(x, this)) { if (UseMovImmediateLoad(x, this)) {
CpuFeatureScope scope(this, ARMv7);
// UseMovImmediateLoad should return false when we need to output // UseMovImmediateLoad should return false when we need to output
// relocation info, since we prefer the constant pool for values that // relocation info, since we prefer the constant pool for values that
// can be patched. // can be patched.
...@@ -1160,12 +1161,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x, ...@@ -1160,12 +1161,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible. // Re-use the destination register as a scratch if possible.
Register target = rd != pc ? rd : temps.Acquire(); Register target = rd != pc ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) { uint32_t imm32 = static_cast<uint32_t>(x.immediate());
uint32_t imm32 = static_cast<uint32_t>(x.immediate()); movw(target, imm32 & 0xFFFF, cond);
CpuFeatureScope scope(this, ARMv7); movt(target, imm32 >> 16, cond);
movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
}
if (target.code() != rd.code()) { if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond); mov(rd, target, LeaveCC, cond);
} }
......
...@@ -1549,6 +1549,9 @@ class Assembler : public AssemblerBase { ...@@ -1549,6 +1549,9 @@ class Assembler : public AssemblerBase {
UNREACHABLE(); UNREACHABLE();
} }
// Move a 32-bit immediate into a register, potentially via the constant pool.
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
protected: protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
...@@ -1680,9 +1683,6 @@ class Assembler : public AssemblerBase { ...@@ -1680,9 +1683,6 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer(); inline void CheckBuffer();
void GrowBuffer(); void GrowBuffer();
// 32-bit immediate values
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Instruction generation // Instruction generation
void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x); void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
// Attempt to encode operand |x| for instruction |instr| and return true on // Attempt to encode operand |x| for instruction |instr| and return true on
......
...@@ -2294,6 +2294,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2294,6 +2294,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in r4 by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(r4, r4);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2308,8 +2311,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2308,8 +2311,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ stm(db_w, sp, gp_regs); __ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg); __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Pass instance and function index as explicit arguments to the runtime
// function.
__ push(kWasmInstanceRegister); __ push(kWasmInstanceRegister);
__ push(r4);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister, __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2746,6 +2746,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2746,6 +2746,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
__ sxtw(x8, w8);
__ SmiTag(x8, x8);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2760,8 +2764,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2760,8 +2764,9 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PushXRegList(gp_regs); __ PushXRegList(gp_regs);
__ PushDRegList(fp_regs); __ PushDRegList(fp_regs);
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Pass instance and function index as explicit arguments to the runtime
__ PushArgument(kWasmInstanceRegister); // function.
__ Push(kWasmInstanceRegister, x8);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister, __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2481,6 +2481,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { ...@@ -2481,6 +2481,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(edi);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2504,8 +2507,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2504,8 +2507,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size; offset += kSimd128Size;
} }
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister); __ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(edi);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ mov(ecx, FieldOperand(kWasmInstanceRegister, __ mov(ecx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2423,6 +2423,10 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { ...@@ -2423,6 +2423,10 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
__ Pop(r11);
// Convert to Smi for the runtime call.
__ SmiTag(r11, r11);
{ {
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort. TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2446,8 +2450,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2446,8 +2450,10 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size; offset += kSimd128Size;
} }
// Pass the WASM instance as an explicit argument to WasmCompileLazy. // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister); __ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(r11);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ movp(rcx, FieldOperand(kWasmInstanceRegister, __ movp(rcx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -3216,6 +3216,12 @@ void Assembler::GrowBuffer() { ...@@ -3216,6 +3216,12 @@ void Assembler::GrowBuffer() {
*p += pc_delta; *p += pc_delta;
} }
// Relocate js-to-wasm calls (which are encoded pc-relative).
for (RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
!it.done(); it.next()) {
it.rinfo()->apply(pc_delta);
}
DCHECK(!buffer_overflow()); DCHECK(!buffer_overflow());
} }
......
...@@ -1739,10 +1739,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT ...@@ -1739,10 +1739,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
os << "\n - managed_native_allocations: " os << "\n - managed_native_allocations: "
<< Brief(managed_native_allocations()); << Brief(managed_native_allocations());
} }
if (has_managed_indirect_patcher()) {
os << "\n - managed_indirect_patcher: "
<< Brief(managed_indirect_patcher());
}
os << "\n - memory_start: " << static_cast<void*>(memory_start()); os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size(); os << "\n - memory_size: " << memory_size();
os << "\n - memory_mask: " << AsHex(memory_mask()); os << "\n - memory_mask: " << AsHex(memory_mask());
......
...@@ -291,8 +291,9 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) { ...@@ -291,8 +291,9 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0); CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(func_index, 1);
ClearThreadInWasmScope wasm_flag(true); ClearThreadInWasmScope wasm_flag(true);
...@@ -306,7 +307,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) { ...@@ -306,7 +307,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance()); DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance());
#endif #endif
Address entrypoint = wasm::CompileLazy(isolate, instance); Address entrypoint = wasm::CompileLazy(
isolate, instance->compiled_module()->GetNativeModule(), func_index);
return reinterpret_cast<Object*>(entrypoint); return reinterpret_cast<Object*>(entrypoint);
} }
......
...@@ -581,7 +581,7 @@ namespace internal { ...@@ -581,7 +581,7 @@ namespace internal {
F(WasmThrow, 0, 1) \ F(WasmThrow, 0, 1) \
F(WasmThrowCreate, 2, 1) \ F(WasmThrowCreate, 2, 1) \
F(WasmThrowTypeError, 0, 1) \ F(WasmThrowTypeError, 0, 1) \
F(WasmCompileLazy, 1, 1) F(WasmCompileLazy, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \ #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(DebugBreakOnBytecode, 1, 2) \ F(DebugBreakOnBytecode, 1, 2) \
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "src/wasm/jump-table-assembler.h" #include "src/wasm/jump-table-assembler.h"
#include "src/assembler-inl.h"
#include "src/macro-assembler-inl.h" #include "src/macro-assembler-inl.h"
namespace v8 { namespace v8 {
...@@ -27,6 +28,122 @@ void JumpTableAssembler::EmitJumpTrampoline(Address target) { ...@@ -27,6 +28,122 @@ void JumpTableAssembler::EmitJumpTrampoline(Address target) {
#endif #endif
} }
// The implementation is compact enough to implement it inline here. If it gets
// much bigger, we might want to split it in a separate file per architecture.
#if V8_TARGET_ARCH_X64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// TODO(clemensh): Try more efficient sequences.
// Alternative 1:
// [header]: mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: push [0]
// jmp [header] // pc-relative --> slot size: 10 bytes
//
// Alternative 2:
// [header]: lea r10, [rip - [header]]
// shr r10, 3 // compute index from offset
// push r10
// mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: call [header]
// ret // -> slot size: 5 bytes
// Use a push, because mov to an extended register takes 6 bytes.
pushq(Immediate(func_index)); // max 5 bytes
movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
jmp(kScratchRegister); // 3 bytes
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
movq(kScratchRegister, static_cast<uint64_t>(target));
jmp(kScratchRegister);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
}
#elif V8_TARGET_ARCH_IA32
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(edi, func_index); // 5 bytes
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
jmp(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
}
#elif V8_TARGET_ARCH_ARM
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Load function index to r4.
// This generates <= 3 instructions: ldr, const pool start, constant
Move32BitImmediate(r4, Operand(func_index));
// Jump to {lazy_compile_target}.
int offset =
lazy_compile_target - reinterpret_cast<Address>(pc_) - kPcLoadDelta;
DCHECK_EQ(0, offset % kInstrSize);
DCHECK(is_int26(offset)); // 26 bit imm
b(offset); // 1 instr
CheckConstPool(true, false); // force emit of const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
int offset = target - reinterpret_cast<Address>(pc_) - kPcLoadDelta;
DCHECK_EQ(0, offset % kInstrSize);
DCHECK(is_int26(offset)); // 26 bit imm
b(offset);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstrSize);
for (; bytes > 0; bytes -= kInstrSize) {
nop();
}
}
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
Mov(w8, func_index); // max. 2 instr
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
Jump(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstructionSize);
for (; bytes > 0; bytes -= kInstructionSize) {
nop();
}
}
#else
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
UNIMPLEMENTED();
}
void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
UNIMPLEMENTED();
}
#endif
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_ #define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -26,8 +27,42 @@ class JumpTableAssembler : public TurboAssembler { ...@@ -26,8 +27,42 @@ class JumpTableAssembler : public TurboAssembler {
public: public:
JumpTableAssembler() : TurboAssembler(GetDefaultIsolateData(), nullptr, 0) {} JumpTableAssembler() : TurboAssembler(GetDefaultIsolateData(), nullptr, 0) {}
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(GetDefaultIsolateData(),
reinterpret_cast<void*>(slot_addr), size) {}
// Emit a trampoline to a possibly far away code target. // Emit a trampoline to a possibly far away code target.
void EmitJumpTrampoline(Address target); void EmitJumpTrampoline(Address target);
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
#else
static constexpr int kJumpTableSlotSize = 1;
#endif
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
void EmitJumpSlot(Address target);
void NopBytes(int bytes);
static void PatchJumpTableSlot(Address slot, Address new_target,
WasmCode::FlushICache flush_i_cache) {
JumpTableAssembler jsasm(slot);
jsasm.EmitJumpSlot(new_target);
jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
}; };
} // namespace wasm } // namespace wasm
......
This diff is collapsed.
...@@ -65,15 +65,8 @@ V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript( ...@@ -65,15 +65,8 @@ V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes); Isolate* isolate, const ModuleWireBytes& wire_bytes);
// Triggered by the WasmCompileLazy builtin. // Triggered by the WasmCompileLazy builtin.
// Walks the stack (top three frames) to determine the wasm instance involved // Returns the instruction start of the compiled code object.
// and which function to compile. Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
// patching the call site or indirect function tables.
// Returns either the Code object that has been lazily compiled, or Illegal if
// an error occurred. In the latter case, a pending exception has been set,
// which will be triggered when returning from the runtime function, i.e. the
// Illegal builtin will never be called.
Address CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance);
// Encapsulates all the state and steps of an asynchronous compilation. // Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed // An asynchronous compile job consists of a number of tasks that are executed
......
This diff is collapsed.
...@@ -94,7 +94,8 @@ class V8_EXPORT_PRIVATE WasmCode final { ...@@ -94,7 +94,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
kLazyStub, kLazyStub,
kRuntimeStub, kRuntimeStub,
kInterpreterEntry, kInterpreterEntry,
kTrampoline kTrampoline,
kJumpTable
}; };
// Each runtime stub is identified by an id. This id is used to reference the // Each runtime stub is identified by an id. This id is used to reference the
...@@ -251,10 +252,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -251,10 +252,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index); WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by // When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will initialize the code table with it. Copies // calling SetLazyBuiltin. It will be copied into this NativeModule and the
// of it might be cloned from them later when creating entries for exported // jump table will be populated with that copy.
// functions and indirect callable functions, so that they may be identified
// by the runtime.
void SetLazyBuiltin(Handle<Code> code); void SetLazyBuiltin(Handle<Code> code);
// Initializes all runtime stubs by copying them over from the JS-allocated // Initializes all runtime stubs by copying them over from the JS-allocated
...@@ -282,6 +281,12 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -282,6 +281,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
return code; return code;
} }
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
}
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address);
// Transition this module from code relying on trap handlers (i.e. without // Transition this module from code relying on trap handlers (i.e. without
// explicit memory bounds checks) to code that does not require trap handlers // explicit memory bounds checks) to code that does not require trap handlers
// (i.e. code with explicit bounds checks). // (i.e. code with explicit bounds checks).
...@@ -290,11 +295,9 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -290,11 +295,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// after calling this method. // after calling this method.
void DisableTrapHandler(); void DisableTrapHandler();
// Returns the instruction start of code suitable for indirect or import calls // Returns the target to call for the given function (returns a jump table
// for the given function index. If the code at the given index is the lazy // slot within {jump_table_}).
// compile stub, it will clone a non-anonymous lazy compile stub for the Address GetCallTargetForFunction(uint32_t func_index) const;
// purpose. This will soon change to always return a jump table slot.
Address GetCallTargetForFunction(uint32_t index);
bool SetExecutable(bool executable); bool SetExecutable(bool executable);
...@@ -322,6 +325,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -322,6 +325,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; } void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; } bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
WasmCode* Lookup(Address) const;
const size_t instance_id = 0; const size_t instance_id = 0;
~NativeModule(); ~NativeModule();
...@@ -333,9 +338,10 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -333,9 +338,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class NativeModuleModificationScope; friend class NativeModuleModificationScope;
static base::AtomicNumber<size_t> next_id_; static base::AtomicNumber<size_t> next_id_;
NativeModule(Isolate* isolate, uint32_t num_functions, uint32_t num_imports, NativeModule(Isolate* isolate, uint32_t num_functions,
bool can_request_more, VirtualMemory* code_space, uint32_t num_imported_functions, bool can_request_more,
WasmCodeManager* code_manager, ModuleEnv& env); VirtualMemory* code_space, WasmCodeManager* code_manager,
ModuleEnv& env);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind); WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
Address AllocateForCode(size_t size); Address AllocateForCode(size_t size);
...@@ -354,13 +360,16 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -354,13 +360,16 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t handler_table_offset, size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions>, WasmCode::Tier, std::unique_ptr<ProtectedInstructions>, WasmCode::Tier,
WasmCode::FlushICache); WasmCode::FlushICache);
WasmCode* CloneCode(const WasmCode*, WasmCode::FlushICache);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>); Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>); Address CreateTrampolineTo(Handle<Code>);
// TODO(7424): Only used for debugging in {WasmCode::Validate}. Remove. // TODO(7424): Only used for debugging in {WasmCode::Validate}. Remove.
Code* ReverseTrampolineLookup(Address target); Code* ReverseTrampolineLookup(Address target);
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
void PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache);
void set_code(uint32_t index, WasmCode* code) { void set_code(uint32_t index, WasmCode* code) {
DCHECK_LT(index, num_functions_); DCHECK_LT(index, num_functions_);
DCHECK_LE(num_imported_functions_, index); DCHECK_LE(num_imported_functions_, index);
...@@ -375,7 +384,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -375,7 +384,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t num_functions_; uint32_t num_functions_;
uint32_t num_imported_functions_; uint32_t num_imported_functions_;
std::unique_ptr<WasmCode* []> code_table_; std::unique_ptr<WasmCode* []> code_table_;
std::unique_ptr<WasmCode* []> lazy_compile_stubs_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr}; WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
...@@ -383,6 +391,9 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -383,6 +391,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// start of the trampoline. // start of the trampoline.
std::unordered_map<Address, Address> trampolines_; std::unordered_map<Address, Address> trampolines_;
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_; std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
// A phantom reference to the {WasmModuleObject}. It is intentionally not // A phantom reference to the {WasmModuleObject}. It is intentionally not
...@@ -423,6 +434,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { ...@@ -423,6 +434,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
Isolate* isolate, size_t memory_estimate, uint32_t num_functions, Isolate* isolate, size_t memory_estimate, uint32_t num_functions,
uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env); uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const; WasmCode* LookupCode(Address pc) const;
WasmCode* GetCodeFromStartAddress(Address pc) const; WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const; size_t remaining_uncommitted_code_space() const;
......
...@@ -68,62 +68,19 @@ void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) { ...@@ -68,62 +68,19 @@ void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) {
relocate_direct_calls_module_ = native_module; relocate_direct_calls_module_ = native_module;
} }
bool CodeSpecialization::ApplyToWholeModule( bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
NativeModule* native_module, Handle<WasmModuleObject> module_object, ICacheFlushMode icache_flush_mode) {
ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
WasmModule* module = module_object->module();
std::vector<WasmFunction>* wasm_functions =
&module_object->module()->functions;
FixedArray* export_wrappers = module_object->export_wrappers();
DCHECK_EQ(export_wrappers->length(), module->num_exported_functions);
bool changed = false; bool changed = false;
int func_index = module->num_imported_functions;
// Patch all wasm functions. // Patch all wasm functions.
for (int num_wasm_functions = static_cast<int>(wasm_functions->size()); for (WasmCode* wasm_code : native_module->code_table()) {
func_index < num_wasm_functions; ++func_index) { if (wasm_code == nullptr) continue;
WasmCode* wasm_function = native_module->code(func_index); if (wasm_code->kind() != WasmCode::kFunction) continue;
// TODO(clemensh): Get rid of this nullptr check changed |= ApplyToWasmCode(wasm_code, icache_flush_mode);
if (wasm_function == nullptr ||
wasm_function->kind() != WasmCode::kFunction) {
continue;
}
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
} }
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
// calls, the instance registered for that (relocate_direct_calls_module_)
// should match the instance we currently patch (instance).
if (relocate_direct_calls_module_ != nullptr) {
DCHECK_EQ(native_module, relocate_direct_calls_module_);
reloc_mode |= RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
}
if (!reloc_mode) return changed;
int wrapper_index = 0;
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
Code* export_wrapper = Code::cast(export_wrappers->get(wrapper_index++));
if (export_wrapper->kind() != Code::JS_TO_WASM_FUNCTION) continue;
for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::JS_TO_WASM_CALL: {
changed = true;
Address new_target =
native_module->GetCallTargetForFunction(exp.index);
it.rinfo()->set_js_to_wasm_address(new_target, icache_flush_mode);
} break;
default:
UNREACHABLE();
}
}
}
DCHECK_EQ(module->functions.size(), func_index);
DCHECK_EQ(export_wrappers->length(), wrapper_index);
return changed; return changed;
} }
...@@ -167,9 +124,9 @@ bool CodeSpecialization::ApplyToWasmCode(wasm::WasmCode* code, ...@@ -167,9 +124,9 @@ bool CodeSpecialization::ApplyToWasmCode(wasm::WasmCode* code,
uint32_t called_func_index = ExtractDirectCallIndex( uint32_t called_func_index = ExtractDirectCallIndex(
patch_direct_calls_helper->decoder, patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos); patch_direct_calls_helper->func_bytes + byte_pos);
const WasmCode* new_code = native_module->code(called_func_index); Address new_target =
it.rinfo()->set_wasm_call_address(new_code->instruction_start(), native_module->GetCallTargetForFunction(called_func_index);
icache_flush_mode); it.rinfo()->set_wasm_call_address(new_target, icache_flush_mode);
changed = true; changed = true;
} break; } break;
......
...@@ -29,9 +29,8 @@ class CodeSpecialization { ...@@ -29,9 +29,8 @@ class CodeSpecialization {
// Update all direct call sites based on the code table in the given module. // Update all direct call sites based on the code table in the given module.
void RelocateDirectCalls(NativeModule* module); void RelocateDirectCalls(NativeModule* module);
// Apply all relocations and patching to all code in the module (i.e. wasm // Apply all relocations and patching to all code in the module.
// code and exported function wrapper code). bool ApplyToWholeModule(NativeModule*,
bool ApplyToWholeModule(NativeModule*, Handle<WasmModuleObject>,
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
// Apply all relocations and patching to one wasm code object. // Apply all relocations and patching to one wasm code object.
bool ApplyToWasmCode(wasm::WasmCode*, bool ApplyToWasmCode(wasm::WasmCode*,
......
...@@ -568,56 +568,6 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions( ...@@ -568,56 +568,6 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions(
return new_arr; return new_arr;
} }
using CodeRelocationMap = std::map<Address, Address>;
void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next()) {
Address target = it.rinfo()->target_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
it.rinfo()->set_wasm_call_address(new_target->second);
}
}
void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
!it.done(); it.next()) {
Address target = it.rinfo()->js_to_wasm_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
it.rinfo()->set_js_to_wasm_address(new_target->second);
}
}
void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
for (uint32_t i = native_module->num_imported_functions(),
e = native_module->num_functions();
i < e; ++i) {
wasm::WasmCode* code = native_module->code(i);
RedirectCallsitesInCode(isolate, code, map);
}
// TODO(6668): Find instances that imported our code and also patch those.
// Redirect all calls in exported functions.
FixedArray* export_wrapper = instance->module_object()->export_wrappers();
for (int i = 0, e = export_wrapper->length(); i != e; ++i) {
Code* code = Code::cast(export_wrapper->get(i));
RedirectCallsitesInJSWrapperCode(isolate, code, map);
}
}
} // namespace } // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) { Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
...@@ -663,7 +613,6 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info, ...@@ -663,7 +613,6 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
wasm::NativeModule* native_module = wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule(); instance->compiled_module()->GetNativeModule();
wasm::WasmModule* module = instance->module(); wasm::WasmModule* module = instance->module();
CodeRelocationMap code_to_relocate;
// We may modify js wrappers, as well as wasm functions. Hence the 2 // We may modify js wrappers, as well as wasm functions. Hence the 2
// modification scopes. // modification scopes.
...@@ -680,16 +629,10 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info, ...@@ -680,16 +629,10 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
isolate, func_index, module->functions[func_index].sig); isolate, func_index, module->functions[func_index].sig);
const wasm::WasmCode* wasm_new_code = const wasm::WasmCode* wasm_new_code =
native_module->AddInterpreterEntry(new_code, func_index); native_module->AddInterpreterEntry(new_code, func_index);
const wasm::WasmCode* old_code =
native_module->code(static_cast<uint32_t>(func_index));
Handle<Foreign> foreign_holder = isolate->factory()->NewForeign( Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
wasm_new_code->instruction_start(), TENURED); wasm_new_code->instruction_start(), TENURED);
interpreted_functions->set(func_index, *foreign_holder); interpreted_functions->set(func_index, *foreign_holder);
DCHECK_EQ(0, code_to_relocate.count(old_code->instruction_start()));
code_to_relocate.insert(std::make_pair(old_code->instruction_start(),
wasm_new_code->instruction_start()));
} }
RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
} }
void WasmDebugInfo::PrepareStep(StepAction step_action) { void WasmDebugInfo::PrepareStep(StepAction step_action) {
......
...@@ -2676,18 +2676,23 @@ class ThreadImpl { ...@@ -2676,18 +2676,23 @@ class ThreadImpl {
return {ExternalCallResult::INVALID_FUNC}; return {ExternalCallResult::INVALID_FUNC};
} }
WasmCode* code; IndirectFunctionTableEntry entry(instance_object_, entry_index);
Handle<WasmInstanceObject> instance; // Signature check.
{ if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
IndirectFunctionTableEntry entry(instance_object_, entry_index); return {ExternalCallResult::SIGNATURE_MISMATCH};
// Signature check. }
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
instance = handle(entry.instance(), isolate); Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress( Address target = entry.target();
entry.target()); NativeModule* native_module =
isolate->wasm_engine()->code_manager()->LookupNativeModule(target);
WasmCode* code;
if (native_module->is_jump_table_slot(target)) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
code = native_module->code(func_index);
} else {
code = native_module->Lookup(target);
} }
// Call either an internal or external WASM function. // Call either an internal or external WASM function.
......
...@@ -169,8 +169,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances, ...@@ -169,8 +169,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
FixedArray, kIndirectFunctionTableInstancesOffset) FixedArray, kIndirectFunctionTableInstancesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign, OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset) kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_indirect_patcher, Foreign,
kManagedIndirectPatcherOffset)
ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset) ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset) ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset) ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
......
...@@ -78,7 +78,7 @@ class IndirectFunctionTableEntry { ...@@ -78,7 +78,7 @@ class IndirectFunctionTableEntry {
// - target = pointer to wasm-to-js wrapper code entrypoint // - target = pointer to wasm-to-js wrapper code entrypoint
// - an imported wasm function from another instance, which has fields // - an imported wasm function from another instance, which has fields
// - instance = target instance // - instance = target instance
// - target = entrypoint to wasm code of the function // - target = entrypoint for the function
class ImportedFunctionEntry { class ImportedFunctionEntry {
public: public:
inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index); inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index);
...@@ -388,7 +388,6 @@ class WasmInstanceObject : public JSObject { ...@@ -388,7 +388,6 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_callables, FixedArray) DECL_ACCESSORS(imported_function_callables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray) DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign) DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(managed_indirect_patcher, Foreign)
DECL_ACCESSORS(undefined_value, Oddball) DECL_ACCESSORS(undefined_value, Oddball)
DECL_ACCESSORS(null_value, Oddball) DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code) DECL_ACCESSORS(centry_stub, Code)
...@@ -423,7 +422,6 @@ class WasmInstanceObject : public JSObject { ...@@ -423,7 +422,6 @@ class WasmInstanceObject : public JSObject {
V(kImportedFunctionCallablesOffset, kPointerSize) \ V(kImportedFunctionCallablesOffset, kPointerSize) \
V(kIndirectFunctionTableInstancesOffset, kPointerSize) \ V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
V(kManagedNativeAllocationsOffset, kPointerSize) \ V(kManagedNativeAllocationsOffset, kPointerSize) \
V(kManagedIndirectPatcherOffset, kPointerSize) \
V(kUndefinedValueOffset, kPointerSize) \ V(kUndefinedValueOffset, kPointerSize) \
V(kNullValueOffset, kPointerSize) \ V(kNullValueOffset, kPointerSize) \
V(kCEntryStubOffset, kPointerSize) \ V(kCEntryStubOffset, kPointerSize) \
......
...@@ -268,7 +268,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate, ...@@ -268,7 +268,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
} }
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const { size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code->kind() == WasmCode::kLazyStub) return sizeof(size_t); if (code == nullptr) return sizeof(size_t);
DCHECK_EQ(WasmCode::kFunction, code->kind()); DCHECK_EQ(WasmCode::kFunction, code->kind());
return kCodeHeaderSize + code->instructions().size() + return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() + code->reloc_info().size() + code->source_positions().size() +
...@@ -290,7 +290,7 @@ void NativeModuleSerializer::WriteHeader(Writer* writer) { ...@@ -290,7 +290,7 @@ void NativeModuleSerializer::WriteHeader(Writer* writer) {
} }
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
if (code->kind() == WasmCode::kLazyStub) { if (code == nullptr) {
writer->Write(size_t{0}); writer->Write(size_t{0});
return; return;
} }
...@@ -500,6 +500,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { ...@@ -500,6 +500,8 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
handler_table_offset, std::move(protected_instructions), tier, handler_table_offset, std::move(protected_instructions), tier,
WasmCode::kNoFlushICache); WasmCode::kNoFlushICache);
native_module_->set_code(fn_index, ret); native_module_->set_code(fn_index, ret);
native_module_->PatchJumpTable(fn_index, ret->instruction_start(),
WasmCode::kFlushICache);
// Relocate the code. // Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
......
...@@ -118,9 +118,9 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name) { ...@@ -118,9 +118,9 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name) {
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) { Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
// Wrap the code so it can be called as a JS function. // Wrap the code so it can be called as a JS function.
Link(); Link();
wasm::WasmCode* code = native_module_->code(index); Address target = native_module_->GetCallTargetForFunction(index);
Handle<Code> ret_code = compiler::CompileJSToWasmWrapper( Handle<Code> ret_code = compiler::CompileJSToWasmWrapper(
isolate_, test_module_ptr_, code->instruction_start(), index, isolate_, test_module_ptr_, target, index,
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler); trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler);
Handle<JSFunction> ret = WasmExportedFunction::New( Handle<JSFunction> ret = WasmExportedFunction::New(
isolate_, instance_object(), MaybeHandle<String>(), isolate_, instance_object(), MaybeHandle<String>(),
...@@ -165,9 +165,9 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() { ...@@ -165,9 +165,9 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
for (int j = 0; j < table_size; j++) { for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_->functions[table.values[j]]; WasmFunction& function = test_module_->functions[table.values[j]];
int sig_id = test_module_->signature_map.Find(function.sig); int sig_id = test_module_->signature_map.Find(function.sig);
auto wasm_code = native_module_->code(function.func_index); auto target =
IndirectFunctionTableEntry(instance, j) native_module_->GetCallTargetForFunction(function.func_index);
.set(sig_id, *instance, wasm_code->instruction_start()); IndirectFunctionTableEntry(instance, j).set(sig_id, *instance, target);
} }
} }
} }
......
...@@ -210,14 +210,12 @@ class TestingModuleBuilder { ...@@ -210,14 +210,12 @@ class TestingModuleBuilder {
return reinterpret_cast<Address>(globals_data_); return reinterpret_cast<Address>(globals_data_);
} }
void Link() { void Link() {
if (!linked_) { if (linked_) return;
Handle<WasmModuleObject> module(instance_object()->module_object()); CodeSpecialization code_specialization;
CodeSpecialization code_specialization; code_specialization.RelocateDirectCalls(native_module_);
code_specialization.RelocateDirectCalls(native_module_); code_specialization.ApplyToWholeModule(native_module_);
code_specialization.ApplyToWholeModule(native_module_, module); linked_ = true;
linked_ = true; native_module_->SetExecutable(true);
native_module_->SetExecutable(true);
}
} }
ModuleEnv CreateModuleEnv(); ModuleEnv CreateModuleEnv();
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "testing/gmock/include/gmock/gmock.h" #include "testing/gmock/include/gmock/gmock.h"
#include "src/wasm/function-compiler.h" #include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-code-manager.h"
namespace v8 { namespace v8 {
...@@ -143,6 +144,10 @@ enum ModuleStyle : int { Fixed = 0, Growable = 1 }; ...@@ -143,6 +144,10 @@ enum ModuleStyle : int { Fixed = 0, Growable = 1 };
class WasmCodeManagerTest : public TestWithContext, class WasmCodeManagerTest : public TestWithContext,
public ::testing::WithParamInterface<ModuleStyle> { public ::testing::WithParamInterface<ModuleStyle> {
public: public:
static constexpr uint32_t kNumFunctions = 10;
static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
kNumFunctions * JumpTableAssembler::kJumpTableSlotSize);
using NativeModulePtr = std::unique_ptr<NativeModule>; using NativeModulePtr = std::unique_ptr<NativeModule>;
NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size, NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size,
...@@ -150,8 +155,8 @@ class WasmCodeManagerTest : public TestWithContext, ...@@ -150,8 +155,8 @@ class WasmCodeManagerTest : public TestWithContext,
bool can_request_more = style == Growable; bool can_request_more = style == Growable;
wasm::ModuleEnv env(nullptr, UseTrapHandler::kNoTrapHandler, wasm::ModuleEnv env(nullptr, UseTrapHandler::kNoTrapHandler,
RuntimeExceptionSupport::kNoRuntimeExceptionSupport); RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
return manager->NewNativeModule(i_isolate(), size, 10, 0, can_request_more, return manager->NewNativeModule(i_isolate(), size, kNumFunctions, 0,
env); can_request_more, env);
} }
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) { WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
...@@ -175,9 +180,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) { ...@@ -175,9 +180,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
WasmCodeManager manager(0 * page()); WasmCodeManager manager(0 * page());
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam()); ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
CHECK(native_module);
ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, 10),
"OOM in NativeModule::AddOwnedCode"); "OOM in NativeModule::AddOwnedCode");
} }
...@@ -186,7 +189,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) { ...@@ -186,7 +189,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space()); CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
CHECK(native_module); CHECK(native_module);
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
uint32_t index = 0; uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment); WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
...@@ -196,7 +199,8 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) { ...@@ -196,7 +199,8 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment); code = AddCode(native_module.get(), index++,
page() - 4 * kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
...@@ -206,14 +210,14 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) { ...@@ -206,14 +210,14 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
} }
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) { TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCodeManager manager(1 * page()); WasmCodeManager manager(3 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam());
CHECK(nm1); CHECK(nm1);
CHECK(nm2); CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page()); WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 1 * page()), ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * page() - kJumpTableSize),
"OOM in NativeModule::AddOwnedCode"); "OOM in NativeModule::AddOwnedCode");
} }
...@@ -224,10 +228,10 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) { ...@@ -224,10 +228,10 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
CHECK(nm1); CHECK(nm1);
CHECK(nm2); CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page()); WasmCode* code = AddCode(nm1.get(), 0, 1 * page() - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(0, manager1.remaining_uncommitted_code_space()); CHECK_EQ(0, manager1.remaining_uncommitted_code_space());
code = AddCode(nm2.get(), 0, 1 * page()); code = AddCode(nm2.get(), 0, 1 * page() - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
} }
...@@ -252,7 +256,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) { ...@@ -252,7 +256,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
code = AddCode(nm.get(), 1, 2 * page()); code = AddCode(nm.get(), 1, 2 * page());
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page()); CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
code = AddCode(nm.get(), 2, page() - kCodeAlignment); code = AddCode(nm.get(), 2, page() - kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page()); CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment