Commit 33f6c3e1 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

Revert "[wasm] Introduce jump table"

This reverts commit 733b7c82.

Reason for revert: breaks arm64 gc-stress: https://ci.chromium.org/buildbot/client.v8.ports/V8%20Linux%20-%20arm64%20-%20sim%20-%20gc%20stress/11659

Original change's description:
> [wasm] Introduce jump table
> 
> This introduces the concept of a jump table for WebAssembly, which is
> used for every direct and indirect call to any WebAssembly function.
> For lazy compilation, it will initially contain code to call the
> WasmCompileLazy builtin, where it passes the function index to be
> called.
> For non-lazy-compilation, it will contain a jump to the actual code.
> The jump table allows to easily redirect functions for lazy
> compilation, tier-up, debugging and (in the future) code aging. After
> this CL, we will not need to patch existing code any more for any of
> these operations.
> 
> R=​mstarzinger@chromium.org, titzer@chromium.org
> 
> Bug: v8:7758
> Change-Id: I45f9983c2b06ae81bf5ce9847f4542fb48844a4f
> Reviewed-on: https://chromium-review.googlesource.com/1097075
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Ben Titzer <titzer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#53805}

TBR=mstarzinger@chromium.org,titzer@chromium.org,clemensh@chromium.org,sreten.kovacevic@mips.com

Change-Id: Iea358db2cf13656a65cf69a6d82cbbc10d3e7e1c
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7758
Reviewed-on: https://chromium-review.googlesource.com/1105157Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53807}
parent a2b5e64c
......@@ -1153,7 +1153,6 @@ int Operand::InstructionsRequired(const Assembler* assembler,
void Assembler::Move32BitImmediate(Register rd, const Operand& x,
Condition cond) {
if (UseMovImmediateLoad(x, this)) {
CpuFeatureScope scope(this, ARMv7);
// UseMovImmediateLoad should return false when we need to output
// relocation info, since we prefer the constant pool for values that
// can be patched.
......@@ -1161,9 +1160,12 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
Register target = rd != pc ? rd : temps.Acquire();
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
}
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
......
......@@ -1549,9 +1549,6 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
// Move a 32-bit immediate into a register, potentially via the constant pool.
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
......@@ -1683,6 +1680,9 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer();
void GrowBuffer();
// 32-bit immediate values
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Instruction generation
void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
// Attempt to encode operand |x| for instruction |instr| and return true on
......
......@@ -2294,9 +2294,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in r4 by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(r4, r4);
{
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
......@@ -2311,10 +2308,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
// Pass instance and function index as explicit arguments to the runtime
// function.
// Pass the WASM instance as an explicit argument to WasmCompileLazy.
__ push(kWasmInstanceRegister);
__ push(r4);
// Load the correct CEntry builtin from the instance object.
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
......
......@@ -2746,10 +2746,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
__ sxtw(x8, w8);
__ SmiTag(x8, x8);
{
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
......@@ -2764,9 +2760,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PushXRegList(gp_regs);
__ PushDRegList(fp_regs);
// Pass instance and function index as explicit arguments to the runtime
// function.
__ Push(kWasmInstanceRegister, x8);
// Pass the WASM instance as an explicit argument to WasmCompileLazy.
__ PushArgument(kWasmInstanceRegister);
// Load the correct CEntry builtin from the instance object.
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
......
......@@ -2481,9 +2481,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(edi);
{
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
......@@ -2507,10 +2504,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
// Push the WASM instance as an explicit argument to WasmCompileLazy.
// Pass the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(edi);
// Load the correct CEntry builtin from the instance object.
__ mov(ecx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
......
......@@ -2423,10 +2423,6 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
__ Pop(r11);
// Convert to Smi for the runtime call.
__ SmiTag(r11, r11);
{
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
......@@ -2450,10 +2446,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
offset += kSimd128Size;
}
// Push the WASM instance as an explicit argument to WasmCompileLazy.
// Pass the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
__ Push(r11);
// Load the correct CEntry builtin from the instance object.
__ movp(rcx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
......
......@@ -3216,12 +3216,6 @@ void Assembler::GrowBuffer() {
*p += pc_delta;
}
// Relocate js-to-wasm calls (which are encoded pc-relative).
for (RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
!it.done(); it.next()) {
it.rinfo()->apply(pc_delta);
}
DCHECK(!buffer_overflow());
}
......
......@@ -1739,6 +1739,10 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
os << "\n - managed_native_allocations: "
<< Brief(managed_native_allocations());
}
if (has_managed_indirect_patcher()) {
os << "\n - managed_indirect_patcher: "
<< Brief(managed_indirect_patcher());
}
os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size();
os << "\n - memory_mask: " << AsHex(memory_mask());
......
......@@ -291,9 +291,8 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(func_index, 1);
ClearThreadInWasmScope wasm_flag(true);
......@@ -307,8 +306,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance());
#endif
Address entrypoint = wasm::CompileLazy(
isolate, instance->compiled_module()->GetNativeModule(), func_index);
Address entrypoint = wasm::CompileLazy(isolate, instance);
return reinterpret_cast<Object*>(entrypoint);
}
......
......@@ -581,7 +581,7 @@ namespace internal {
F(WasmThrow, 0, 1) \
F(WasmThrowCreate, 2, 1) \
F(WasmThrowTypeError, 0, 1) \
F(WasmCompileLazy, 2, 1)
F(WasmCompileLazy, 1, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(DebugBreakOnBytecode, 1, 2) \
......
......@@ -4,7 +4,6 @@
#include "src/wasm/jump-table-assembler.h"
#include "src/assembler-inl.h"
#include "src/macro-assembler-inl.h"
namespace v8 {
......@@ -28,122 +27,6 @@ void JumpTableAssembler::EmitJumpTrampoline(Address target) {
#endif
}
// The implementation is compact enough to implement it inline here. If it gets
// much bigger, we might want to split it in a separate file per architecture.
#if V8_TARGET_ARCH_X64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// TODO(clemensh): Try more efficient sequences.
// Alternative 1:
// [header]: mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: push [0]
// jmp [header] // pc-relative --> slot size: 10 bytes
//
// Alternative 2:
// [header]: lea r10, [rip - [header]]
// shr r10, 3 // compute index from offset
// push r10
// mov r10, [lazy_compile_target]
// jmp r10
// [slot 0]: call [header]
// ret // -> slot size: 5 bytes
// Use a push, because mov to an extended register takes 6 bytes.
pushq(Immediate(func_index)); // max 5 bytes
movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
jmp(kScratchRegister); // 3 bytes
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
movq(kScratchRegister, static_cast<uint64_t>(target));
jmp(kScratchRegister);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
}
#elif V8_TARGET_ARCH_IA32
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(edi, func_index); // 5 bytes
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
jmp(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
Nop(bytes);
}
#elif V8_TARGET_ARCH_ARM
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Load function index to r4.
// This generates <= 3 instructions: ldr, const pool start, constant
Move32BitImmediate(r4, Operand(func_index));
// Jump to {lazy_compile_target}.
int offset =
lazy_compile_target - reinterpret_cast<Address>(pc_) - kPcLoadDelta;
DCHECK_EQ(0, offset % kInstrSize);
DCHECK(is_int26(offset)); // 26 bit imm
b(offset); // 1 instr
CheckConstPool(true, false); // force emit of const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
int offset = target - reinterpret_cast<Address>(pc_) - kPcLoadDelta;
DCHECK_EQ(0, offset % kInstrSize);
DCHECK(is_int26(offset)); // 26 bit imm
b(offset);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstrSize);
for (; bytes > 0; bytes -= kInstrSize) {
nop();
}
}
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
Mov(w8, func_index); // max. 2 instr
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
Jump(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
DCHECK_EQ(0, bytes % kInstructionSize);
for (; bytes > 0; bytes -= kInstructionSize) {
nop();
}
}
#else
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
UNIMPLEMENTED();
}
void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
UNIMPLEMENTED();
}
#endif
} // namespace wasm
} // namespace internal
} // namespace v8
......@@ -6,7 +6,6 @@
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#include "src/macro-assembler.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
......@@ -27,42 +26,8 @@ class JumpTableAssembler : public TurboAssembler {
public:
JumpTableAssembler() : TurboAssembler(GetDefaultIsolateData(), nullptr, 0) {}
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(GetDefaultIsolateData(),
reinterpret_cast<void*>(slot_addr), size) {}
// Emit a trampoline to a possibly far away code target.
void EmitJumpTrampoline(Address target);
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
#else
static constexpr int kJumpTableSlotSize = 1;
#endif
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
void EmitJumpSlot(Address target);
void NopBytes(int bytes);
static void PatchJumpTableSlot(Address slot, Address new_target,
WasmCode::FlushICache flush_i_cache) {
JumpTableAssembler jsasm(slot);
jsasm.EmitJumpSlot(new_target);
jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
};
} // namespace wasm
......
This diff is collapsed.
......@@ -65,8 +65,15 @@ V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes);
// Triggered by the WasmCompileLazy builtin.
// Returns the instruction start of the compiled code object.
Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
// Walks the stack (top three frames) to determine the wasm instance involved
// and which function to compile.
// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
// patching the call site or indirect function tables.
// Returns either the Code object that has been lazily compiled, or Illegal if
// an error occurred. In the latter case, a pending exception has been set,
// which will be triggered when returning from the runtime function, i.e. the
// Illegal builtin will never be called.
Address CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance);
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
......
This diff is collapsed.
......@@ -94,8 +94,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
kLazyStub,
kRuntimeStub,
kInterpreterEntry,
kTrampoline,
kJumpTable
kTrampoline
};
// Each runtime stub is identified by an id. This id is used to reference the
......@@ -252,8 +251,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddInterpreterEntry(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will be copied into this NativeModule and the
// jump table will be populated with that copy.
// calling SetLazyBuiltin. It will initialize the code table with it. Copies
// of it might be cloned from them later when creating entries for exported
// functions and indirect callable functions, so that they may be identified
// by the runtime.
void SetLazyBuiltin(Handle<Code> code);
// Initializes all runtime stubs by copying them over from the JS-allocated
......@@ -281,12 +282,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return code;
}
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
}
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address);
// Transition this module from code relying on trap handlers (i.e. without
// explicit memory bounds checks) to code that does not require trap handlers
// (i.e. code with explicit bounds checks).
......@@ -295,9 +290,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
// after calling this method.
void DisableTrapHandler();
// Returns the target to call for the given function (returns a jump table
// slot within {jump_table_}).
Address GetCallTargetForFunction(uint32_t func_index) const;
// Returns the instruction start of code suitable for indirect or import calls
// for the given function index. If the code at the given index is the lazy
// compile stub, it will clone a non-anonymous lazy compile stub for the
// purpose. This will soon change to always return a jump table slot.
Address GetCallTargetForFunction(uint32_t index);
bool SetExecutable(bool executable);
......@@ -325,8 +322,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
WasmCode* Lookup(Address) const;
const size_t instance_id = 0;
~NativeModule();
......@@ -338,10 +333,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class NativeModuleModificationScope;
static base::AtomicNumber<size_t> next_id_;
NativeModule(Isolate* isolate, uint32_t num_functions,
uint32_t num_imported_functions, bool can_request_more,
VirtualMemory* code_space, WasmCodeManager* code_manager,
ModuleEnv& env);
NativeModule(Isolate* isolate, uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* code_space,
WasmCodeManager* code_manager, ModuleEnv& env);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
Address AllocateForCode(size_t size);
......@@ -360,16 +354,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions>, WasmCode::Tier,
WasmCode::FlushICache);
WasmCode* CloneCode(const WasmCode*, WasmCode::FlushICache);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>);
// TODO(7424): Only used for debugging in {WasmCode::Validate}. Remove.
Code* ReverseTrampolineLookup(Address target);
WasmCode* CreateEmptyJumpTable(uint32_t num_wasm_functions);
void PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache);
void set_code(uint32_t index, WasmCode* code) {
DCHECK_LT(index, num_functions_);
DCHECK_LE(num_imported_functions_, index);
......@@ -384,6 +375,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t num_functions_;
uint32_t num_imported_functions_;
std::unique_ptr<WasmCode* []> code_table_;
std::unique_ptr<WasmCode* []> lazy_compile_stubs_;
WasmCode* runtime_stub_table_[WasmCode::kRuntimeStubCount] = {nullptr};
......@@ -391,9 +383,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// start of the trampoline.
std::unordered_map<Address, Address> trampolines_;
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
// A phantom reference to the {WasmModuleObject}. It is intentionally not
......@@ -434,7 +423,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
Isolate* isolate, size_t memory_estimate, uint32_t num_functions,
uint32_t num_imported_functions, bool can_request_more, ModuleEnv& env);
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const;
......
......@@ -68,19 +68,62 @@ void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) {
relocate_direct_calls_module_ = native_module;
}
bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
ICacheFlushMode icache_flush_mode) {
bool CodeSpecialization::ApplyToWholeModule(
NativeModule* native_module, Handle<WasmModuleObject> module_object,
ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc;
WasmModule* module = module_object->module();
std::vector<WasmFunction>* wasm_functions =
&module_object->module()->functions;
FixedArray* export_wrappers = module_object->export_wrappers();
DCHECK_EQ(export_wrappers->length(), module->num_exported_functions);
bool changed = false;
int func_index = module->num_imported_functions;
// Patch all wasm functions.
for (WasmCode* wasm_code : native_module->code_table()) {
if (wasm_code == nullptr) continue;
if (wasm_code->kind() != WasmCode::kFunction) continue;
changed |= ApplyToWasmCode(wasm_code, icache_flush_mode);
for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
func_index < num_wasm_functions; ++func_index) {
WasmCode* wasm_function = native_module->code(func_index);
// TODO(clemensh): Get rid of this nullptr check
if (wasm_function == nullptr ||
wasm_function->kind() != WasmCode::kFunction) {
continue;
}
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
// calls, the instance registered for that (relocate_direct_calls_module_)
// should match the instance we currently patch (instance).
if (relocate_direct_calls_module_ != nullptr) {
DCHECK_EQ(native_module, relocate_direct_calls_module_);
reloc_mode |= RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
}
if (!reloc_mode) return changed;
int wrapper_index = 0;
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
Code* export_wrapper = Code::cast(export_wrappers->get(wrapper_index++));
if (export_wrapper->kind() != Code::JS_TO_WASM_FUNCTION) continue;
for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
case RelocInfo::JS_TO_WASM_CALL: {
changed = true;
Address new_target =
native_module->GetCallTargetForFunction(exp.index);
it.rinfo()->set_js_to_wasm_address(new_target, icache_flush_mode);
} break;
default:
UNREACHABLE();
}
}
}
DCHECK_EQ(module->functions.size(), func_index);
DCHECK_EQ(export_wrappers->length(), wrapper_index);
return changed;
}
......@@ -124,9 +167,9 @@ bool CodeSpecialization::ApplyToWasmCode(wasm::WasmCode* code,
uint32_t called_func_index = ExtractDirectCallIndex(
patch_direct_calls_helper->decoder,
patch_direct_calls_helper->func_bytes + byte_pos);
Address new_target =
native_module->GetCallTargetForFunction(called_func_index);
it.rinfo()->set_wasm_call_address(new_target, icache_flush_mode);
const WasmCode* new_code = native_module->code(called_func_index);
it.rinfo()->set_wasm_call_address(new_code->instruction_start(),
icache_flush_mode);
changed = true;
} break;
......
......@@ -29,8 +29,9 @@ class CodeSpecialization {
// Update all direct call sites based on the code table in the given module.
void RelocateDirectCalls(NativeModule* module);
// Apply all relocations and patching to all code in the module.
bool ApplyToWholeModule(NativeModule*,
// Apply all relocations and patching to all code in the module (i.e. wasm
// code and exported function wrapper code).
bool ApplyToWholeModule(NativeModule*, Handle<WasmModuleObject>,
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
// Apply all relocations and patching to one wasm code object.
bool ApplyToWasmCode(wasm::WasmCode*,
......
......@@ -568,6 +568,56 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions(
return new_arr;
}
using CodeRelocationMap = std::map<Address, Address>;
void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
!it.done(); it.next()) {
Address target = it.rinfo()->target_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
it.rinfo()->set_wasm_call_address(new_target->second);
}
}
void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
!it.done(); it.next()) {
Address target = it.rinfo()->js_to_wasm_address();
auto new_target = map->find(target);
if (new_target == map->end()) continue;
it.rinfo()->set_js_to_wasm_address(new_target->second);
}
}
void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
for (uint32_t i = native_module->num_imported_functions(),
e = native_module->num_functions();
i < e; ++i) {
wasm::WasmCode* code = native_module->code(i);
RedirectCallsitesInCode(isolate, code, map);
}
// TODO(6668): Find instances that imported our code and also patch those.
// Redirect all calls in exported functions.
FixedArray* export_wrapper = instance->module_object()->export_wrappers();
for (int i = 0, e = export_wrapper->length(); i != e; ++i) {
Code* code = Code::cast(export_wrapper->get(i));
RedirectCallsitesInJSWrapperCode(isolate, code, map);
}
}
} // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
......@@ -613,6 +663,7 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
wasm::NativeModule* native_module =
instance->compiled_module()->GetNativeModule();
wasm::WasmModule* module = instance->module();
CodeRelocationMap code_to_relocate;
// We may modify js wrappers, as well as wasm functions. Hence the 2
// modification scopes.
......@@ -629,10 +680,16 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
isolate, func_index, module->functions[func_index].sig);
const wasm::WasmCode* wasm_new_code =
native_module->AddInterpreterEntry(new_code, func_index);
const wasm::WasmCode* old_code =
native_module->code(static_cast<uint32_t>(func_index));
Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
wasm_new_code->instruction_start(), TENURED);
interpreted_functions->set(func_index, *foreign_holder);
DCHECK_EQ(0, code_to_relocate.count(old_code->instruction_start()));
code_to_relocate.insert(std::make_pair(old_code->instruction_start(),
wasm_new_code->instruction_start()));
}
RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
......
......@@ -2676,23 +2676,18 @@ class ThreadImpl {
return {ExternalCallResult::INVALID_FUNC};
}
IndirectFunctionTableEntry entry(instance_object_, entry_index);
// Signature check.
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
Handle<WasmInstanceObject> instance = handle(entry.instance(), isolate);
Address target = entry.target();
NativeModule* native_module =
isolate->wasm_engine()->code_manager()->LookupNativeModule(target);
WasmCode* code;
if (native_module->is_jump_table_slot(target)) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
code = native_module->code(func_index);
} else {
code = native_module->Lookup(target);
Handle<WasmInstanceObject> instance;
{
IndirectFunctionTableEntry entry(instance_object_, entry_index);
// Signature check.
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
instance = handle(entry.instance(), isolate);
code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
entry.target());
}
// Call either an internal or external WASM function.
......
......@@ -169,6 +169,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
FixedArray, kIndirectFunctionTableInstancesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_indirect_patcher, Foreign,
kManagedIndirectPatcherOffset)
ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
......
......@@ -78,7 +78,7 @@ class IndirectFunctionTableEntry {
// - target = pointer to wasm-to-js wrapper code entrypoint
// - an imported wasm function from another instance, which has fields
// - instance = target instance
// - target = entrypoint for the function
// - target = entrypoint to wasm code of the function
class ImportedFunctionEntry {
public:
inline ImportedFunctionEntry(Handle<WasmInstanceObject>, int index);
......@@ -388,6 +388,7 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(imported_function_callables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(managed_indirect_patcher, Foreign)
DECL_ACCESSORS(undefined_value, Oddball)
DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
......@@ -422,6 +423,7 @@ class WasmInstanceObject : public JSObject {
V(kImportedFunctionCallablesOffset, kPointerSize) \
V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
V(kManagedNativeAllocationsOffset, kPointerSize) \
V(kManagedIndirectPatcherOffset, kPointerSize) \
V(kUndefinedValueOffset, kPointerSize) \
V(kNullValueOffset, kPointerSize) \
V(kCEntryStubOffset, kPointerSize) \
......
......@@ -268,7 +268,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
if (code == nullptr) return sizeof(size_t);
if (code->kind() == WasmCode::kLazyStub) return sizeof(size_t);
DCHECK_EQ(WasmCode::kFunction, code->kind());
return kCodeHeaderSize + code->instructions().size() +
code->reloc_info().size() + code->source_positions().size() +
......@@ -290,7 +290,7 @@ void NativeModuleSerializer::WriteHeader(Writer* writer) {
}
void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
if (code == nullptr) {
if (code->kind() == WasmCode::kLazyStub) {
writer->Write(size_t{0});
return;
}
......@@ -500,8 +500,6 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
handler_table_offset, std::move(protected_instructions), tier,
WasmCode::kNoFlushICache);
native_module_->set_code(fn_index, ret);
native_module_->PatchJumpTable(fn_index, ret->instruction_start(),
WasmCode::kFlushICache);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
......
......@@ -118,9 +118,9 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name) {
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
// Wrap the code so it can be called as a JS function.
Link();
Address target = native_module_->GetCallTargetForFunction(index);
wasm::WasmCode* code = native_module_->code(index);
Handle<Code> ret_code = compiler::CompileJSToWasmWrapper(
isolate_, test_module_ptr_, target, index,
isolate_, test_module_ptr_, code->instruction_start(), index,
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler);
Handle<JSFunction> ret = WasmExportedFunction::New(
isolate_, instance_object(), MaybeHandle<String>(),
......@@ -165,9 +165,9 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_->functions[table.values[j]];
int sig_id = test_module_->signature_map.Find(function.sig);
auto target =
native_module_->GetCallTargetForFunction(function.func_index);
IndirectFunctionTableEntry(instance, j).set(sig_id, *instance, target);
auto wasm_code = native_module_->code(function.func_index);
IndirectFunctionTableEntry(instance, j)
.set(sig_id, *instance, wasm_code->instruction_start());
}
}
}
......
......@@ -210,12 +210,14 @@ class TestingModuleBuilder {
return reinterpret_cast<Address>(globals_data_);
}
void Link() {
if (linked_) return;
CodeSpecialization code_specialization;
code_specialization.RelocateDirectCalls(native_module_);
code_specialization.ApplyToWholeModule(native_module_);
linked_ = true;
native_module_->SetExecutable(true);
if (!linked_) {
Handle<WasmModuleObject> module(instance_object()->module_object());
CodeSpecialization code_specialization;
code_specialization.RelocateDirectCalls(native_module_);
code_specialization.ApplyToWholeModule(native_module_, module);
linked_ = true;
native_module_->SetExecutable(true);
}
}
ModuleEnv CreateModuleEnv();
......
......@@ -6,7 +6,6 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
......@@ -144,10 +143,6 @@ enum ModuleStyle : int { Fixed = 0, Growable = 1 };
class WasmCodeManagerTest : public TestWithContext,
public ::testing::WithParamInterface<ModuleStyle> {
public:
static constexpr uint32_t kNumFunctions = 10;
static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
kNumFunctions * JumpTableAssembler::kJumpTableSlotSize);
using NativeModulePtr = std::unique_ptr<NativeModule>;
NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size,
......@@ -155,8 +150,8 @@ class WasmCodeManagerTest : public TestWithContext,
bool can_request_more = style == Growable;
wasm::ModuleEnv env(nullptr, UseTrapHandler::kNoTrapHandler,
RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
return manager->NewNativeModule(i_isolate(), size, kNumFunctions, 0,
can_request_more, env);
return manager->NewNativeModule(i_isolate(), size, 10, 0, can_request_more,
env);
}
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
......@@ -180,7 +175,9 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
WasmCodeManager manager(0 * page());
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
CHECK(native_module);
ASSERT_DEATH_IF_SUPPORTED(AddCode(native_module.get(), 0, 10),
"OOM in NativeModule::AddOwnedCode");
}
......@@ -189,7 +186,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
CHECK(native_module);
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code);
......@@ -199,8 +196,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
code = AddCode(native_module.get(), index++,
page() - 4 * kCodeAlignment - kJumpTableSize);
code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
......@@ -210,14 +206,14 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCodeManager manager(3 * page());
NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam());
WasmCodeManager manager(1 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam());
CHECK(nm1);
CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 2 * page() - kJumpTableSize);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 2 * page() - kJumpTableSize),
ASSERT_DEATH_IF_SUPPORTED(AddCode(nm2.get(), 0, 1 * page()),
"OOM in NativeModule::AddOwnedCode");
}
......@@ -228,10 +224,10 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
CHECK(nm1);
CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page() - kJumpTableSize);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager1.remaining_uncommitted_code_space());
code = AddCode(nm2.get(), 0, 1 * page() - kJumpTableSize);
code = AddCode(nm2.get(), 0, 1 * page());
CHECK_NOT_NULL(code);
}
......@@ -256,7 +252,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
code = AddCode(nm.get(), 1, 2 * page());
CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
code = AddCode(nm.get(), 2, page() - kCodeAlignment - kJumpTableSize);
code = AddCode(nm.get(), 2, page() - kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted_code_space(), 7 * page());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment