Commit 04c90edb authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Implement f32 comparisons

This implements the full set of f32 comparisons (f32.eq, f32.ne,
f32.lt, f32.gt, f32.le, f32.ge) on ia32 and x64.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: I2be786431d01b4ed540b70f3e4a27c19b7d2649e
Reviewed-on: https://chromium-review.googlesource.com/928982Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51545}
parent 903144f3
......@@ -157,6 +157,12 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
BAILOUT("emit_i32_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
......@@ -218,7 +224,7 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
compiler::CallDescriptor* call_descriptor,
Register target) {
BAILOUT("CallIndirect");
}
......
......@@ -157,6 +157,12 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
BAILOUT("emit_i32_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
......@@ -218,7 +224,7 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc,
compiler::CallDescriptor* call_descriptor,
Register target) {
BAILOUT("CallIndirect");
}
......
......@@ -649,6 +649,22 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
j(cond, label);
}
namespace liftoff {
inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
Register tmp_byte_reg = dst;
// Only the lower 4 registers can be addressed as 8-bit registers.
if (!dst.is_byte_register()) {
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
tmp_byte_reg = assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
}
assm->setcc(cond, tmp_byte_reg);
assm->movzx_b(dst, tmp_byte_reg);
}
} // namespace liftoff
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
if (rhs != no_reg) {
......@@ -656,18 +672,29 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
} else {
test(lhs, lhs);
}
liftoff::setcc_32(this, cond, dst);
}
Register tmp_byte_reg = dst;
// Only the lower 4 registers can be addressed as 8-bit registers.
if (!dst.is_byte_register()) {
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
// {mov} does not change the status flags, so calling {GetUnusedRegister}
// should be fine here.
tmp_byte_reg = GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
Label cont;
Label not_nan;
ucomiss(lhs, rhs);
// IF PF is one, one of the operands was Nan. This needs special handling.
j(parity_odd, &not_nan, Label::kNear);
// Return 1 for f32.ne, 0 for all other cases.
if (cond == not_equal) {
mov(dst, Immediate(1));
} else {
xor_(dst, dst);
}
jmp(&cont, Label::kNear);
bind(&not_nan);
setcc(cond, tmp_byte_reg);
movzx_b(dst, tmp_byte_reg);
liftoff::setcc_32(this, cond, dst);
bind(&cont);
}
void LiftoffAssembler::StackCheck(Label* ool_code) {
......
......@@ -392,6 +392,8 @@ class LiftoffAssembler : public TurboAssembler {
// Set {dst} to 1 if condition holds, 0 otherwise.
inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
Register rhs = no_reg);
inline void emit_f32_set_cond(Condition, Register dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void StackCheck(Label* ool_code);
......
......@@ -554,8 +554,8 @@ class LiftoffCompiler {
}
template <ValueType type, typename EmitFn>
void EmitBinOp(EmitFn fn) {
constexpr RegClass rc = reg_class_for(type);
void EmitMonomorphicBinOp(EmitFn fn) {
static constexpr RegClass rc = reg_class_for(type);
LiftoffRegList pinned;
LiftoffRegister dst = pinned.set(__ GetBinaryOpTargetRegister(rc));
LiftoffRegister rhs = pinned.set(__ PopToRegister(rc, pinned));
......@@ -564,35 +564,51 @@ class LiftoffCompiler {
__ PushRegister(type, dst);
}
template <ValueType result_type, RegClass src_rc, typename EmitFn>
void EmitBinOpWithDifferentResultType(EmitFn fn) {
LiftoffRegList pinned;
LiftoffRegister rhs = pinned.set(__ PopToRegister(src_rc, pinned));
LiftoffRegister lhs = pinned.set(__ PopToRegister(src_rc, pinned));
LiftoffRegister dst = __ GetUnusedRegister(reg_class_for(result_type));
fn(dst, lhs, rhs);
__ PushRegister(result_type, dst);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI32>( \
return EmitMonomorphicBinOp<kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
});
#define CASE_FLOAT_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasm##type>( \
return EmitMonomorphicBinOp<kWasm##type>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
});
#define CASE_CMPOP(opcode, cond) \
#define CASE_I32_CMPOP(opcode, cond) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI32>( \
return EmitMonomorphicBinOp<kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
});
#define CASE_F32_CMPOP(opcode, cond) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOpWithDifferentResultType<kWasmI32, kFpReg>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
});
#define CASE_SHIFTOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI32>( \
return EmitMonomorphicBinOp<kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
});
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
case WasmOpcode::kExpr##opcode: \
return EmitBinOp<kWasmI32>( \
return EmitMonomorphicBinOp<kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
Register args[] = {lhs.gp(), rhs.gp()}; \
auto ext_ref = ExternalReference::ext_ref_fn(__ isolate()); \
......@@ -605,16 +621,22 @@ class LiftoffCompiler {
CASE_I32_BINOP(I32And, i32_and)
CASE_I32_BINOP(I32Ior, i32_or)
CASE_I32_BINOP(I32Xor, i32_xor)
CASE_CMPOP(I32Eq, kEqual)
CASE_CMPOP(I32Ne, kUnequal)
CASE_CMPOP(I32LtS, kSignedLessThan)
CASE_CMPOP(I32LtU, kUnsignedLessThan)
CASE_CMPOP(I32GtS, kSignedGreaterThan)
CASE_CMPOP(I32GtU, kUnsignedGreaterThan)
CASE_CMPOP(I32LeS, kSignedLessEqual)
CASE_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_CMPOP(I32GeU, kUnsignedGreaterEqual)
CASE_I32_CMPOP(I32Eq, kEqual)
CASE_I32_CMPOP(I32Ne, kUnequal)
CASE_I32_CMPOP(I32LtS, kSignedLessThan)
CASE_I32_CMPOP(I32LtU, kUnsignedLessThan)
CASE_I32_CMPOP(I32GtS, kSignedGreaterThan)
CASE_I32_CMPOP(I32GtU, kUnsignedGreaterThan)
CASE_I32_CMPOP(I32LeS, kSignedLessEqual)
CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
CASE_F32_CMPOP(F32Eq, kEqual)
CASE_F32_CMPOP(F32Ne, kUnequal)
CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
CASE_SHIFTOP(I32Shl, i32_shl)
CASE_SHIFTOP(I32ShrS, i32_sar)
CASE_SHIFTOP(I32ShrU, i32_shr)
......@@ -631,8 +653,9 @@ class LiftoffCompiler {
}
#undef CASE_I32_BINOP
#undef CASE_FLOAT_BINOP
#undef CASE_I32_CMPOP
#undef CASE_F32_CMPOP
#undef CASE_SHIFTOP
#undef CASE_CMPOP
#undef CASE_CCALL_BINOP
}
......
......@@ -243,6 +243,12 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
bind(&true_label);
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
......
......@@ -238,6 +238,12 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
bind(&true_label);
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
......
......@@ -157,6 +157,12 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
BAILOUT("emit_i32_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
......
......@@ -157,6 +157,12 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
BAILOUT("emit_i32_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
......
......@@ -604,6 +604,29 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
movzxbl(dst, dst);
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
Label cont;
Label not_nan;
Ucomiss(lhs, rhs);
// IF PF is one, one of the operands was Nan. This needs special handling.
j(parity_odd, &not_nan, Label::kNear);
// Return 1 for f32.ne, 0 for all other cases.
if (cond == not_equal) {
movl(dst, Immediate(1));
} else {
xorl(dst, dst);
}
jmp(&cont, Label::kNear);
bind(&not_nan);
setcc(cond, dst);
movzxbl(dst, dst);
bind(&cont);
}
void LiftoffAssembler::StackCheck(Label* ool_code) {
Register limit = GetUnusedRegister(kGpReg).gp();
LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment