Commit e3be96d6 authored by Joey Gouly's avatar Joey Gouly Committed by Commit Bot

[liftoff] Introduce emit_{i64,i32}_add with immediate

This allows immediates to be encoded directly into instructions, rather than
mov-ing constants to registers first.

This patch only changes emit_{i64,i32}_add, other emit_ functions will be changed once
this approach has been approved.

Bug: v8:9038

Change-Id: I0c7306c2da0dae26f1c6e2465a9565adbf0bda84
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1524482
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60506}
parent 45689a1c
...@@ -139,6 +139,26 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -139,6 +139,26 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
} }
} }
template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
Condition),
void (Assembler::*op_with_carry)(Register, Register, const Operand&,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
UseScratchRegisterScope temps(assm);
Register scratch = dst.low_gp();
bool can_use_dst = dst.low_gp() != lhs.high_gp();
if (!can_use_dst) {
scratch = temps.Acquire();
}
(assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate is 0.
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(0), LeaveCC, al);
if (!can_use_dst) {
assm->mov(dst.low_gp(), scratch);
}
}
template <void (TurboAssembler::*op)(Register, Register, Register, Register, template <void (TurboAssembler::*op)(Register, Register, Register, Register,
Register), Register),
bool is_left_shift> bool is_left_shift>
...@@ -658,6 +678,10 @@ FP64_UNOP(f64_sqrt, vsqrt) ...@@ -658,6 +678,10 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef FP64_UNOP #undef FP64_UNOP
#undef FP64_BINOP #undef FP64_BINOP
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
add(dst, lhs, Operand(imm));
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) { bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src); clz(dst, src);
return true; return true;
...@@ -790,6 +814,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -790,6 +814,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs); liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>(this, dst, lhs, rhs); liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>(this, dst, lhs, rhs);
......
...@@ -580,6 +580,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, ...@@ -580,6 +580,15 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w); Msub(dst_w, scratch, rhs_w, lhs_w);
} }
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
Add(dst.W(), lhs.W(), Immediate(imm));
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, LiftoffRegister rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
......
...@@ -523,6 +523,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) { ...@@ -523,6 +523,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
} }
} }
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
if (lhs != dst) {
lea(dst, Operand(lhs, imm));
} else {
add(dst, Immediate(imm));
}
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) { void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) { if (dst != rhs) {
// Default path. // Default path.
...@@ -793,6 +801,36 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -793,6 +801,36 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high); LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64); if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
} }
template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
// First, compute the low half of the result, potentially into a temporary dst
// register if {dst.low_gp()} equals any register we need to
// keep alive for computing the upper half.
LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
Register dst_low = keep_alive.has(dst.low_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.low_gp();
if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
(assm->*op)(dst_low, Immediate(imm));
// Now compute the upper half, while keeping alive the previous result.
keep_alive = LiftoffRegList::ForRegs(dst_low);
Register dst_high = keep_alive.has(dst.high_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.high_gp();
if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
// Top half of the immediate is 0.
(assm->*op_with_carry)(dst_high, 0);
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
}
} // namespace liftoff } // namespace liftoff
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
...@@ -800,6 +838,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -800,6 +838,11 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs); liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
} }
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(this, dst, lhs, rhs); liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(this, dst, lhs, rhs);
......
...@@ -389,6 +389,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -389,6 +389,7 @@ class LiftoffAssembler : public TurboAssembler {
// i32 binops. // i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs); inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_add(Register dst, Register lhs, int32_t imm);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs); inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs); inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_divs(Register dst, Register lhs, Register rhs, inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
...@@ -419,6 +420,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -419,6 +420,8 @@ class LiftoffAssembler : public TurboAssembler {
// i64 binops. // i64 binops.
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs); LiftoffRegister rhs);
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs); LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
...@@ -481,6 +484,14 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -481,6 +484,14 @@ class LiftoffAssembler : public TurboAssembler {
} }
} }
inline void emit_ptrsize_add(Register dst, Register lhs, int32_t imm) {
if (kSystemPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
} else {
emit_i32_add(dst, lhs, imm);
}
}
// f32 binops. // f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs); DoubleRegister rhs);
......
...@@ -785,6 +785,37 @@ class LiftoffCompiler { ...@@ -785,6 +785,37 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION #undef CASE_TYPE_CONVERSION
} }
template <ValueType src_type, ValueType result_type, typename EmitFn,
typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
if (rhs_slot.loc() == LiftoffAssembler::VarState::kIntConst) {
__ cache_state()->stack_state.pop_back();
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
fnImm(dst, lhs, imm);
__ PushRegister(result_type, dst);
} else {
// The RHS was not an immediate.
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
fn(dst, lhs, rhs);
__ PushRegister(result_type, dst);
}
}
template <ValueType src_type, ValueType result_type, typename EmitFn> template <ValueType src_type, ValueType result_type, typename EmitFn>
void EmitBinOp(EmitFn fn) { void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass src_rc = reg_class_for(src_type);
...@@ -830,12 +861,30 @@ class LiftoffCompiler { ...@@ -830,12 +861,30 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \ __ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
}); });
#define CASE_I32_BINOPI(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOpImm<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
}, \
[=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
__ emit_##fn(dst.gp(), lhs.gp(), imm); \
});
#define CASE_I64_BINOP(opcode, fn) \ #define CASE_I64_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \ case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI64, kWasmI64>( \ return EmitBinOp<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \ __ emit_##fn(dst, lhs, rhs); \
}); });
#define CASE_I64_BINOPI(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOpImm<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
}, \
[=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
__ emit_##fn(dst, lhs, imm); \
});
#define CASE_FLOAT_BINOP(opcode, type, fn) \ #define CASE_FLOAT_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \ case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasm##type, kWasm##type>( \ return EmitBinOp<kWasm##type, kWasm##type>( \
...@@ -891,7 +940,7 @@ class LiftoffCompiler { ...@@ -891,7 +940,7 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \ GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
}); });
switch (opcode) { switch (opcode) {
CASE_I32_BINOP(I32Add, i32_add) CASE_I32_BINOPI(I32Add, i32_add)
CASE_I32_BINOP(I32Sub, i32_sub) CASE_I32_BINOP(I32Sub, i32_sub)
CASE_I32_BINOP(I32Mul, i32_mul) CASE_I32_BINOP(I32Mul, i32_mul)
CASE_I32_BINOP(I32And, i32_and) CASE_I32_BINOP(I32And, i32_and)
...@@ -910,7 +959,7 @@ class LiftoffCompiler { ...@@ -910,7 +959,7 @@ class LiftoffCompiler {
CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual) CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual) CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual) CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
CASE_I64_BINOP(I64Add, i64_add) CASE_I64_BINOPI(I64Add, i64_add)
CASE_I64_BINOP(I64Sub, i64_sub) CASE_I64_BINOP(I64Sub, i64_sub)
CASE_I64_BINOP(I64Mul, i64_mul) CASE_I64_BINOP(I64Mul, i64_mul)
CASE_I64_CMPOP(I64Eq, kEqual) CASE_I64_CMPOP(I64Eq, kEqual)
...@@ -1060,7 +1109,9 @@ class LiftoffCompiler { ...@@ -1060,7 +1109,9 @@ class LiftoffCompiler {
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode)); return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
} }
#undef CASE_I32_BINOP #undef CASE_I32_BINOP
#undef CASE_I32_BINOPI
#undef CASE_I64_BINOP #undef CASE_I64_BINOP
#undef CASE_I64_BINOPI
#undef CASE_FLOAT_BINOP #undef CASE_FLOAT_BINOP
#undef CASE_I32_CMPOP #undef CASE_I32_CMPOP
#undef CASE_I64_CMPOP #undef CASE_I64_CMPOP
...@@ -1553,8 +1604,7 @@ class LiftoffCompiler { ...@@ -1553,8 +1604,7 @@ class LiftoffCompiler {
if (index != old_index) __ Move(index, old_index, kWasmI32); if (index != old_index) __ Move(index, old_index, kWasmI32);
} }
Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp(); Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
__ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset)); __ emit_ptrsize_add(index, index, *offset);
__ emit_ptrsize_add(index, index, tmp);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize); LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp); __ emit_ptrsize_and(index, index, tmp);
*offset = 0; *offset = 0;
......
...@@ -434,6 +434,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) { ...@@ -434,6 +434,14 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
} }
} }
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
if (lhs != dst) {
leal(dst, Operand(lhs, imm));
} else {
addl(dst, Immediate(imm));
}
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) { void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) { if (dst != rhs) {
// Default path. // Default path.
...@@ -704,6 +712,15 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -704,6 +712,15 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
} }
} }
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
if (lhs.gp() != dst.gp()) {
leaq(dst.gp(), Operand(lhs.gp(), imm));
} else {
addq(dst.gp(), Immediate(imm));
}
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
if (dst.gp() == rhs.gp()) { if (dst.gp() == rhs.gp()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment