Commit 4cbec82c authored by Ben L. Titzer's avatar Ben L. Titzer Committed by Commit Bot

[wasm] Use a name for the lazy compile func index

Pure refactoring CL. Introduce a symbolic name for the register
used to hold the function index when calling the lazy compile stub.
This makes it easier to see this contract when looking at the
macro assembler.

R=ahaas@chromium.org
CC=clemensh@chromium.org

Change-Id: I714f978883ced001a1435338dcefd96744bfb2ae
Reviewed-on: https://chromium-review.googlesource.com/c/1273099
Commit-Queue: Ben Titzer <titzer@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56516}
parent ed93fc67
...@@ -38,6 +38,7 @@ constexpr Register kRuntimeCallFunctionRegister = r1; ...@@ -38,6 +38,7 @@ constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0; constexpr Register kRuntimeCallArgCountRegister = r0;
constexpr Register kRuntimeCallArgvRegister = r2; constexpr Register kRuntimeCallArgvRegister = r2;
constexpr Register kWasmInstanceRegister = r3; constexpr Register kWasmInstanceRegister = r3;
constexpr Register kWasmLazyCompileFuncIndexRegister = r4;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Static helper functions // Static helper functions
......
...@@ -65,6 +65,7 @@ constexpr Register kRuntimeCallFunctionRegister = x1; ...@@ -65,6 +65,7 @@ constexpr Register kRuntimeCallFunctionRegister = x1;
constexpr Register kRuntimeCallArgCountRegister = x0; constexpr Register kRuntimeCallArgCountRegister = x0;
constexpr Register kRuntimeCallArgvRegister = x11; constexpr Register kRuntimeCallArgvRegister = x11;
constexpr Register kWasmInstanceRegister = x7; constexpr Register kWasmInstanceRegister = x7;
constexpr Register kWasmCompileLazyFuncIndexRegister = x8;
#define LS_MACRO_LIST(V) \ #define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \ V(Ldrb, Register&, rt, LDRB_w) \
......
...@@ -2187,9 +2187,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2187,9 +2187,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in r4 by the jump table trampoline. // The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call. // Convert to Smi for the runtime call.
__ SmiTag(r4, r4); __ SmiTag(kWasmLazyCompileFuncIndexRegister,
kWasmLazyCompileFuncIndexRegister);
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2207,7 +2208,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2207,7 +2208,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as explicit arguments to the runtime // Pass instance and function index as explicit arguments to the runtime
// function. // function.
__ push(kWasmInstanceRegister); __ push(kWasmInstanceRegister);
__ push(r4); __ push(kWasmLazyCompileFuncIndexRegister);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister, __ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2647,8 +2647,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2647,8 +2647,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline. // The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call. // Sign extend and convert to Smi for the runtime call.
__ sxtw(x8, w8); __ sxtw(kWasmCompileLazyFuncIndexRegister,
__ SmiTag(x8, x8); kWasmCompileLazyFuncIndexRegister.W());
__ SmiTag(kWasmCompileLazyFuncIndexRegister,
kWasmCompileLazyFuncIndexRegister);
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2665,7 +2667,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2665,7 +2667,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as explicit arguments to the runtime // Pass instance and function index as explicit arguments to the runtime
// function. // function.
__ Push(kWasmInstanceRegister, x8); __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister, __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2482,7 +2482,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2482,7 +2482,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in edi by the jump table trampoline. // The function index was put in edi by the jump table trampoline.
// Convert to Smi for the runtime call. // Convert to Smi for the runtime call.
__ SmiTag(edi); __ SmiTag(kWasmCompileLazyFuncIndexRegister);
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2510,7 +2510,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2510,7 +2510,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the WASM instance as an explicit argument to WasmCompileLazy. // Push the WASM instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister); __ Push(kWasmInstanceRegister);
// Push the function index as second argument. // Push the function index as second argument.
__ Push(edi); __ Push(kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ mov(ecx, FieldOperand(kWasmInstanceRegister, __ mov(ecx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2245,7 +2245,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2245,7 +2245,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline. // The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call. // Convert to Smi for the runtime call.
__ SmiTag(t0); __ SmiTag(kWasmCompileLazyFuncIndexRegister);
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2261,7 +2261,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2261,7 +2261,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as an explicit arguments to the runtime // Pass instance and function index as an explicit arguments to the runtime
// function. // function.
__ Push(kWasmInstanceRegister, t0); __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ lw(a2, FieldMemOperand(kWasmInstanceRegister, __ lw(a2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2262,7 +2262,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2262,7 +2262,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline. // The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call // Convert to Smi for the runtime call
__ SmiTag(t0); __ SmiTag(kWasmCompileLazyFuncIndexRegister);
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2279,7 +2279,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2279,7 +2279,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as an explicit arguments to the runtime // Pass instance and function index as an explicit arguments to the runtime
// function. // function.
__ Push(kWasmInstanceRegister, t0); __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ Ld(a2, FieldMemOperand(kWasmInstanceRegister, __ Ld(a2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -2269,9 +2269,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2269,9 +2269,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in r15 by the jump table trampoline. // The function index was put in a register by the jump table trampoline.
// Convert to Smi for the runtime call. // Convert to Smi for the runtime call.
__ SmiTag(r15, r15); __ SmiTag(kWasmCompileLazyFuncIndexRegister,
kWasmCompileLazyFuncIndexRegister);
{ {
HardAbortScope hard_abort(masm); // Avoid calls to Abort. HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY); FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
...@@ -2288,7 +2289,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2288,7 +2289,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Pass instance and function index as explicit arguments to the runtime // Pass instance and function index as explicit arguments to the runtime
// function. // function.
__ Push(kWasmInstanceRegister, r15); __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
// Load the correct CEntry builtin from the instance object. // Load the correct CEntry builtin from the instance object.
__ LoadP(r5, FieldMemOperand(kWasmInstanceRegister, __ LoadP(r5, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset)); WasmInstanceObject::kCEntryStubOffset));
......
...@@ -45,6 +45,7 @@ constexpr Register kRuntimeCallFunctionRegister = edx; ...@@ -45,6 +45,7 @@ constexpr Register kRuntimeCallFunctionRegister = edx;
constexpr Register kRuntimeCallArgCountRegister = eax; constexpr Register kRuntimeCallArgCountRegister = eax;
constexpr Register kRuntimeCallArgvRegister = ecx; constexpr Register kRuntimeCallArgvRegister = ecx;
constexpr Register kWasmInstanceRegister = esi; constexpr Register kWasmInstanceRegister = esi;
constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
// TODO(v8:6666): Implement full support. // TODO(v8:6666): Implement full support.
constexpr Register kRootRegister = ebx; constexpr Register kRootRegister = ebx;
......
...@@ -37,6 +37,7 @@ constexpr Register kRuntimeCallFunctionRegister = a1; ...@@ -37,6 +37,7 @@ constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0; constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2; constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0; constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
// Forward declarations // Forward declarations
enum class AbortReason : uint8_t; enum class AbortReason : uint8_t;
......
...@@ -37,6 +37,7 @@ constexpr Register kRuntimeCallFunctionRegister = a1; ...@@ -37,6 +37,7 @@ constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0; constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2; constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0; constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
// Forward declarations. // Forward declarations.
enum class AbortReason : uint8_t; enum class AbortReason : uint8_t;
......
...@@ -39,6 +39,7 @@ constexpr Register kRuntimeCallFunctionRegister = r4; ...@@ -39,6 +39,7 @@ constexpr Register kRuntimeCallFunctionRegister = r4;
constexpr Register kRuntimeCallArgCountRegister = r3; constexpr Register kRuntimeCallArgCountRegister = r3;
constexpr Register kRuntimeCallArgvRegister = r5; constexpr Register kRuntimeCallArgvRegister = r5;
constexpr Register kWasmInstanceRegister = r10; constexpr Register kWasmInstanceRegister = r10;
constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Static helper functions // Static helper functions
......
...@@ -53,7 +53,7 @@ void JumpTableAssembler::NopBytes(int bytes) { ...@@ -53,7 +53,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) { Address lazy_compile_target) {
mov(edi, func_index); // 5 bytes mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
} }
...@@ -69,10 +69,10 @@ void JumpTableAssembler::NopBytes(int bytes) { ...@@ -69,10 +69,10 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) { Address lazy_compile_target) {
// Load function index to r4. // Load function index to a register.
// This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker, // This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
// constant] on ARMv6. // constant] on ARMv6.
Move32BitImmediate(r4, Operand(func_index)); Move32BitImmediate(kWasmLazyCompileFuncIndexRegister, Operand(func_index));
// EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr, // EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
// constant]. // constant].
// In total, this is <=5 instructions on all architectures. // In total, this is <=5 instructions on all architectures.
...@@ -99,7 +99,7 @@ void JumpTableAssembler::NopBytes(int bytes) { ...@@ -99,7 +99,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM64 #elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) { Address lazy_compile_target) {
Mov(w8, func_index); // max. 2 instr Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
} }
...@@ -144,7 +144,7 @@ void JumpTableAssembler::NopBytes(int bytes) { ...@@ -144,7 +144,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) { Address lazy_compile_target) {
li(t0, func_index); // max. 2 instr li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 4 instructions for 32-bit platform // Jump produces max. 4 instructions for 32-bit platform
// and max. 6 instructions for 64-bit platform. // and max. 6 instructions for 64-bit platform.
Jump(lazy_compile_target, RelocInfo::NONE); Jump(lazy_compile_target, RelocInfo::NONE);
...@@ -165,8 +165,8 @@ void JumpTableAssembler::NopBytes(int bytes) { ...@@ -165,8 +165,8 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_PPC #elif V8_TARGET_ARCH_PPC
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) { Address lazy_compile_target) {
// Load function index to r8. max 5 instrs // Load function index to register. max 5 instrs
mov(r15, Operand(func_index)); mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// Jump to {lazy_compile_target}. max 5 instrs // Jump to {lazy_compile_target}. max 5 instrs
mov(r0, Operand(lazy_compile_target)); mov(r0, Operand(lazy_compile_target));
mtctr(r0); mtctr(r0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment