Commit b61b715c authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Add f64 support

This CL adds support for f64.const, f64.add, f64.sub and f64.mul.

R=ahaas@chromium.org

Bug: v8:6600
Change-Id: I7374ede800db83303c8fa647a183fdda53a151cd
Reviewed-on: https://chromium-review.googlesource.com/913613Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51263}
parent da4f9119
......@@ -129,6 +129,9 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
......
......@@ -129,6 +129,9 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
......
......@@ -71,12 +71,12 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
case kWasmF32: {
Register tmp = GetUnusedRegister(kGpReg).gp();
mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
movd(reg.fp(), tmp);
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
case kWasmF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
}
default:
UNREACHABLE();
}
......@@ -568,6 +568,47 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vaddsd(dst, lhs, rhs);
} else if (dst == rhs) {
addsd(dst, lhs);
} else {
if (dst != lhs) movsd(dst, lhs);
addsd(dst, rhs);
}
}
void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vsubsd(dst, lhs, rhs);
} else if (dst == rhs) {
movsd(kScratchDoubleReg, rhs);
movsd(dst, lhs);
subsd(dst, kScratchDoubleReg);
} else {
if (dst != lhs) movsd(dst, lhs);
subsd(dst, rhs);
}
}
void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmulsd(dst, lhs, rhs);
} else if (dst == rhs) {
mulsd(dst, lhs);
} else {
if (dst != lhs) movsd(dst, lhs);
mulsd(dst, rhs);
}
}
void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
......
......@@ -373,6 +373,12 @@ class LiftoffAssembler : public TurboAssembler {
DoubleRegister rhs);
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_i32_test(Register);
inline void emit_i32_compare(Register, Register);
......
......@@ -599,9 +599,9 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, dst_reg);
}
void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
DoubleRegister,
DoubleRegister)) {
void FloatBinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
DoubleRegister,
DoubleRegister)) {
LiftoffRegList pinned;
LiftoffRegister target_reg =
pinned.set(__ GetBinaryOpTargetRegister(kFpReg));
......@@ -648,9 +648,12 @@ class LiftoffCompiler {
CASE_SHIFTOP(I32ShrU, i32_shr)
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
CASE_BINOP(F32Add, F32, f32_add)
CASE_BINOP(F32Sub, F32, f32_sub)
CASE_BINOP(F32Mul, F32, f32_mul)
CASE_BINOP(F32Add, Float, f32_add)
CASE_BINOP(F32Sub, Float, f32_sub)
CASE_BINOP(F32Mul, Float, f32_mul)
CASE_BINOP(F64Add, Float, f64_add)
CASE_BINOP(F64Sub, Float, f64_sub)
CASE_BINOP(F64Mul, Float, f64_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
......@@ -689,7 +692,10 @@ class LiftoffCompiler {
}
void F64Const(Decoder* decoder, Value* result, double value) {
unsupported(decoder, "f64.const");
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
CheckStackSizeLimit(decoder);
}
void Drop(Decoder* decoder, const Value& value) {
......
......@@ -169,6 +169,9 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
......
......@@ -164,6 +164,9 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
......
......@@ -129,6 +129,9 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
......
......@@ -129,6 +129,9 @@ UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
#undef UNIMPLEMENTED_GP_BINOP
#undef UNIMPLEMENTED_GP_UNOP
......
......@@ -58,8 +58,8 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
break;
case kWasmI64:
if (value.to_i64() == 0 && RelocInfo::IsNone(rmode)) {
xorq(reg.gp(), reg.gp());
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), value.to_i64(), rmode);
}
......@@ -67,6 +67,9 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
case kWasmF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
UNREACHABLE();
}
......@@ -505,6 +508,47 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vaddsd(dst, lhs, rhs);
} else if (dst == rhs) {
addsd(dst, lhs);
} else {
if (dst != lhs) movsd(dst, lhs);
addsd(dst, rhs);
}
}
void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vsubsd(dst, lhs, rhs);
} else if (dst == rhs) {
movsd(kScratchDoubleReg, rhs);
movsd(dst, lhs);
subsd(dst, kScratchDoubleReg);
} else {
if (dst != lhs) movsd(dst, lhs);
subsd(dst, rhs);
}
}
void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmulsd(dst, lhs, rhs);
} else if (dst == rhs) {
mulsd(dst, lhs);
} else {
if (dst != lhs) movsd(dst, lhs);
mulsd(dst, rhs);
}
}
void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment