Commit 9474b540 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

Revert "[liftoff] Introduce emit_{i64,i32}_add with immediate"

This reverts commit e3be96d6.

Reason for revert: Fails on ia32: https://ci.chromium.org/p/v8/builders/ci/V8%20Linux/31041

Original change's description:
> [liftoff] Introduce emit_{i64,i32}_add with immediate
> 
> This allows immediates to be encoded directly into instructions, rather than
> mov-ing constants to registers first.
> 
> This patch only changes emit_{i64,i32}_add, other emit_ functions will be changed once
> this approach has been approved.
> 
> Bug: v8:9038
> 
> Change-Id: I0c7306c2da0dae26f1c6e2465a9565adbf0bda84
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1524482
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#60506}

TBR=clemensh@chromium.org,martyn.capewell@arm.com,joey.gouly@arm.com

Change-Id: I131b13dc7178e31919fc2fffacec72c0697d93a6
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9038
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1543354Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60508}
parent a7fa1ae2
......@@ -139,26 +139,6 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
Condition),
void (Assembler::*op_with_carry)(Register, Register, const Operand&,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
UseScratchRegisterScope temps(assm);
Register scratch = dst.low_gp();
bool can_use_dst = dst.low_gp() != lhs.high_gp();
if (!can_use_dst) {
scratch = temps.Acquire();
}
(assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate is 0.
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(0), LeaveCC, al);
if (!can_use_dst) {
assm->mov(dst.low_gp(), scratch);
}
}
template <void (TurboAssembler::*op)(Register, Register, Register, Register,
Register),
bool is_left_shift>
......@@ -678,10 +658,6 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef FP64_UNOP
#undef FP64_BINOP
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
add(dst, lhs, Operand(imm));
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src);
return true;
......@@ -814,11 +790,6 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::I64Binop<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::I64Binop<&Assembler::sub, &Assembler::sbc>(this, dst, lhs, rhs);
......
......@@ -580,15 +580,6 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w);
}
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
Add(dst.W(), lhs.W(), Immediate(imm));
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
......
......@@ -523,14 +523,6 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
if (lhs != dst) {
lea(dst, Operand(lhs, imm));
} else {
add(dst, Immediate(imm));
}
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) {
// Default path.
......@@ -801,36 +793,6 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
}
template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
// First, compute the low half of the result, potentially into a temporary dst
// register if {dst.low_gp()} equals any register we need to
// keep alive for computing the upper half.
LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
Register dst_low = keep_alive.has(dst.low_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.low_gp();
if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
(assm->*op)(dst_low, Immediate(imm));
// Now compute the upper half, while keeping alive the previous result.
keep_alive = LiftoffRegList::ForRegs(dst_low);
Register dst_high = keep_alive.has(dst.high_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.high_gp();
if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
// Top half of the immediate is 0.
(assm->*op_with_carry)(dst_high, 0);
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
}
} // namespace liftoff
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -838,11 +800,6 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(this, dst, lhs, rhs);
......
......@@ -389,7 +389,6 @@ class LiftoffAssembler : public TurboAssembler {
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_add(Register dst, Register lhs, int32_t imm);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
......@@ -420,8 +419,6 @@ class LiftoffAssembler : public TurboAssembler {
// i64 binops.
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -484,14 +481,6 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_add(Register dst, Register lhs, int32_t imm) {
if (kSystemPointerSize == 8) {
emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
} else {
emit_i32_add(dst, lhs, imm);
}
}
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
......
......@@ -785,37 +785,6 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
template <ValueType src_type, ValueType result_type, typename EmitFn,
typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
if (rhs_slot.loc() == LiftoffAssembler::VarState::kIntConst) {
__ cache_state()->stack_state.pop_back();
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
fnImm(dst, lhs, imm);
__ PushRegister(result_type, dst);
} else {
// The RHS was not an immediate.
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
fn(dst, lhs, rhs);
__ PushRegister(result_type, dst);
}
}
template <ValueType src_type, ValueType result_type, typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_type);
......@@ -861,30 +830,12 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
});
#define CASE_I32_BINOPI(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOpImm<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
}, \
[=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
__ emit_##fn(dst.gp(), lhs.gp(), imm); \
});
#define CASE_I64_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
});
#define CASE_I64_BINOPI(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOpImm<kWasmI64, kWasmI64>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst, lhs, rhs); \
}, \
[=](LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
__ emit_##fn(dst, lhs, imm); \
});
#define CASE_FLOAT_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasm##type, kWasm##type>( \
......@@ -940,7 +891,7 @@ class LiftoffCompiler {
GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
});
switch (opcode) {
CASE_I32_BINOPI(I32Add, i32_add)
CASE_I32_BINOP(I32Add, i32_add)
CASE_I32_BINOP(I32Sub, i32_sub)
CASE_I32_BINOP(I32Mul, i32_mul)
CASE_I32_BINOP(I32And, i32_and)
......@@ -959,7 +910,7 @@ class LiftoffCompiler {
CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
CASE_I64_BINOPI(I64Add, i64_add)
CASE_I64_BINOP(I64Add, i64_add)
CASE_I64_BINOP(I64Sub, i64_sub)
CASE_I64_BINOP(I64Mul, i64_mul)
CASE_I64_CMPOP(I64Eq, kEqual)
......@@ -1109,9 +1060,7 @@ class LiftoffCompiler {
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_BINOP
#undef CASE_I32_BINOPI
#undef CASE_I64_BINOP
#undef CASE_I64_BINOPI
#undef CASE_FLOAT_BINOP
#undef CASE_I32_CMPOP
#undef CASE_I64_CMPOP
......@@ -1604,7 +1553,8 @@ class LiftoffCompiler {
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
__ emit_ptrsize_add(index, index, *offset);
__ LoadConstant(LiftoffRegister(tmp), WasmValue(*offset));
__ emit_ptrsize_add(index, index, tmp);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
......
......@@ -434,14 +434,6 @@ void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
}
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
if (lhs != dst) {
leal(dst, Operand(lhs, imm));
} else {
addl(dst, Immediate(imm));
}
}
void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
if (dst != rhs) {
// Default path.
......@@ -712,15 +704,6 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
}
void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
if (lhs.gp() != dst.gp()) {
leaq(dst.gp(), Operand(lhs.gp(), imm));
} else {
addq(dst.gp(), Immediate(imm));
}
}
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (dst.gp() == rhs.gp()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment