Commit 403cac98 authored by Joey Gouly's avatar Joey Gouly Committed by Commit Bot

[liftoff] Optimise {i32,i64}_{and,or,xor} with immediates

This is an extension of 138d2dfc.

Change-Id: Icb10aab6e6799ab4f45dcbd26fc69206dbef29bb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1588430
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61131}
parent 074cf1c3
......@@ -153,8 +153,9 @@ inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
}
(assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(),
Operand(imm < 0 ? -1 : 0), LeaveCC, al);
int32_t sign_extend = imm < 0 ? -1 : 0;
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(sign_extend),
LeaveCC, al);
if (!can_use_dst) {
assm->mov(dst.low_gp(), scratch);
}
......@@ -618,6 +619,12 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
Register rhs) { \
instruction(dst, lhs, rhs); \
}
#define I32_BINOP_I(name, instruction) \
I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
instruction(dst, lhs, Operand(imm)); \
}
#define I32_SHIFTOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
......@@ -648,12 +655,12 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
instruction(dst, lhs, rhs); \
}
I32_BINOP(i32_add, add)
I32_BINOP_I(i32_add, add)
I32_BINOP(i32_sub, sub)
I32_BINOP(i32_mul, mul)
I32_BINOP(i32_and, and_)
I32_BINOP(i32_or, orr)
I32_BINOP(i32_xor, eor)
I32_BINOP_I(i32_and, and_)
I32_BINOP_I(i32_or, orr)
I32_BINOP_I(i32_xor, eor)
I32_SHIFTOP(i32_shl, lsl)
I32_SHIFTOP(i32_sar, asr)
I32_SHIFTOP(i32_shr, lsr)
......@@ -679,10 +686,6 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef FP64_UNOP
#undef FP64_BINOP
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
add(dst, lhs, Operand(imm));
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src);
return true;
......
......@@ -382,11 +382,23 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
Register rhs) { \
instruction(dst.W(), lhs.W(), rhs.W()); \
}
#define I32_BINOP_I(name, instruction) \
I32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
instruction(dst.W(), lhs.W(), Immediate(imm)); \
}
#define I64_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
instruction(dst.gp().X(), lhs.gp().X(), rhs.gp().X()); \
}
#define I64_BINOP_I(name, instruction) \
I64_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
instruction(dst.gp().X(), lhs.gp().X(), imm); \
}
#define FP32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
......@@ -439,21 +451,21 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
instruction(dst.gp().X(), src.gp().X(), amount); \
}
I32_BINOP(i32_add, Add)
I32_BINOP_I(i32_add, Add)
I32_BINOP(i32_sub, Sub)
I32_BINOP(i32_mul, Mul)
I32_BINOP(i32_and, And)
I32_BINOP(i32_or, Orr)
I32_BINOP(i32_xor, Eor)
I32_BINOP_I(i32_and, And)
I32_BINOP_I(i32_or, Orr)
I32_BINOP_I(i32_xor, Eor)
I32_SHIFTOP(i32_shl, Lsl)
I32_SHIFTOP(i32_sar, Asr)
I32_SHIFTOP_I(i32_shr, Lsr)
I64_BINOP(i64_add, Add)
I64_BINOP_I(i64_add, Add)
I64_BINOP(i64_sub, Sub)
I64_BINOP(i64_mul, Mul)
I64_BINOP(i64_and, And)
I64_BINOP(i64_or, Orr)
I64_BINOP(i64_xor, Eor)
I64_BINOP_I(i64_and, And)
I64_BINOP_I(i64_or, Orr)
I64_BINOP_I(i64_xor, Eor)
I64_SHIFTOP(i64_shl, Lsl)
I64_SHIFTOP(i64_sar, Asr)
I64_SHIFTOP_I(i64_shr, Lsr)
......@@ -580,15 +592,6 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w);
}
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
Add(dst.W(), lhs.W(), Immediate(imm));
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
......
......@@ -557,6 +557,13 @@ void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
(assm->*op)(dst, rhs);
}
}
template <void (Assembler::*op)(Register, int32_t)>
void EmitCommutativeBinOpImm(LiftoffAssembler* assm, Register dst, Register lhs,
int32_t imm) {
if (dst != lhs) assm->mov(dst, lhs);
(assm->*op)(dst, imm);
}
} // namespace liftoff
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
......@@ -659,14 +666,26 @@ void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::and_>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::and_>(this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::or_>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::or_>(this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xor_>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::xor_>(this, dst, lhs, imm);
}
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
......@@ -825,7 +844,8 @@ inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
(assm->*op_with_carry)(dst_high, imm < 0 ? -1 : 0);
int32_t sign_extend = imm < 0 ? -1 : 0;
(assm->*op_with_carry)(dst_high, sign_extend);
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
......
......@@ -402,8 +402,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_rem_by_zero);
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_and(Register dst, Register lhs, int32_t imm);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, int32_t imm);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, int32_t imm);
inline void emit_i32_shl(Register dst, Register src, Register amount,
LiftoffRegList pinned = {});
inline void emit_i32_sar(Register dst, Register src, Register amount,
......@@ -437,10 +440,16 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs, Label* trap_rem_by_zero);
inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm);
inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm);
inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm);
inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned = {});
inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
......@@ -686,6 +695,34 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
assm->Move(dst.low_gp(), tmp, kWasmI32);
}
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
// If {dst.low_gp()} does not overlap with {lhs.high_gp()},
// just first compute the lower half, then the upper half.
if (dst.low() != lhs.high()) {
(assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
(assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
return;
}
// If {dst.high_gp()} does not overlap with {lhs.low_gp()},
// we can compute this the other way around.
if (dst.high() != lhs.low()) {
(assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
(assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
return;
}
// Otherwise, we need a temporary register.
Register tmp =
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), imm);
(assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
assm->Move(dst.low_gp(), tmp, kWasmI32);
}
} // namespace liftoff
void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -694,18 +731,36 @@ void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_and>(
this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_or>(
this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xor>(
this, dst, lhs, imm);
}
#endif // V8_TARGET_ARCH_32_BIT
// End of the partially platform-independent implementations of the
......
......@@ -943,12 +943,12 @@ class LiftoffCompiler {
CASE_I32_BINOPI(I32Add, i32_add)
CASE_I32_BINOP(I32Sub, i32_sub)
CASE_I32_BINOP(I32Mul, i32_mul)
CASE_I32_BINOP(I32And, i32_and)
CASE_I32_BINOP(I32Ior, i32_or)
CASE_I32_BINOP(I32Xor, i32_xor)
CASE_I64_BINOP(I64And, i64_and)
CASE_I64_BINOP(I64Ior, i64_or)
CASE_I64_BINOP(I64Xor, i64_xor)
CASE_I32_BINOPI(I32And, i32_and)
CASE_I32_BINOPI(I32Ior, i32_or)
CASE_I32_BINOPI(I32Xor, i32_xor)
CASE_I64_BINOPI(I64And, i64_and)
CASE_I64_BINOPI(I64Ior, i64_or)
CASE_I64_BINOPI(I64Xor, i64_xor)
CASE_I32_CMPOP(I32Eq, kEqual)
CASE_I32_CMPOP(I32Ne, kUnequal)
CASE_I32_CMPOP(I32LtS, kSignedLessThan)
......
......@@ -469,6 +469,14 @@ void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
(assm->*op)(dst, rhs);
}
}
template <void (Assembler::*op)(Register, Immediate),
void (Assembler::*mov)(Register, Register)>
void EmitCommutativeBinOpImm(LiftoffAssembler* assm, Register dst, Register lhs,
int32_t imm) {
if (dst != lhs) (assm->*mov)(dst, lhs);
(assm->*op)(dst, Immediate(imm));
}
} // namespace liftoff
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
......@@ -592,16 +600,31 @@ void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
lhs, rhs);
}
void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::andl, &Assembler::movl>(
this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::orl, &Assembler::movl>(this, dst,
lhs, rhs);
}
void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::orl, &Assembler::movl>(this, dst,
lhs, imm);
}
void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xorl, &Assembler::movl>(this, dst,
lhs, rhs);
}
void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::xorl, &Assembler::movl>(
this, dst, lhs, imm);
}
namespace liftoff {
template <ValueType type>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
......@@ -778,18 +801,36 @@ void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
this, dst.gp(), lhs.gp(), rhs.gp());
}
void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::andq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), imm);
}
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::orq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), rhs.gp());
}
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::orq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), imm);
}
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xorq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), rhs.gp());
}
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitCommutativeBinOpImm<&Assembler::xorq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), imm);
}
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned) {
liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment