Commit be3a1df9 authored by Predrag Rudic's avatar Predrag Rudic Committed by Commit Bot

MIPS Call C runtime function for Round, Ceil, Trunc when fp32 in Liftoff

Change-Id: I1815de5bc5fc955014cba8099e8c704a23a2e9be
Reviewed-on: https://chromium-review.googlesource.com/1044187Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53148}
parent 1b11d98f
......@@ -130,6 +130,11 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
BAILOUT("fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
BAILOUT("fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
......@@ -180,10 +185,10 @@ UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
UNIMPLEMENTED_FP_UNOP(f64_ceil)
UNIMPLEMENTED_FP_UNOP(f64_floor)
UNIMPLEMENTED_FP_UNOP(f64_trunc)
UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
......@@ -191,6 +196,7 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
#undef UNIMPLEMENTED_I32_SHIFTOP
#undef UNIMPLEMENTED_I64_SHIFTOP
......
......@@ -391,6 +391,11 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst.D(), src.D()); \
}
#define FP64_UNOP_RETURN_TRUE(name, instruction) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst.D(), src.D()); \
return true; \
}
#define I32_SHIFTOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
......@@ -441,10 +446,10 @@ FP64_BINOP(f64_min, Fmin)
FP64_BINOP(f64_max, Fmax)
FP64_UNOP(f64_abs, Fabs)
FP64_UNOP(f64_neg, Fneg)
FP64_UNOP(f64_ceil, Frintp)
FP64_UNOP(f64_floor, Frintm)
FP64_UNOP(f64_trunc, Frintz)
FP64_UNOP(f64_nearest_int, Frintn)
FP64_UNOP_RETURN_TRUE(f64_ceil, Frintp)
FP64_UNOP_RETURN_TRUE(f64_floor, Frintm)
FP64_UNOP_RETURN_TRUE(f64_trunc, Frintz)
FP64_UNOP_RETURN_TRUE(f64_nearest_int, Frintn)
FP64_UNOP(f64_sqrt, Fsqrt)
#undef I32_BINOP
......@@ -453,6 +458,7 @@ FP64_UNOP(f64_sqrt, Fsqrt)
#undef FP32_UNOP
#undef FP64_BINOP
#undef FP64_UNOP
#undef FP64_UNOP_RETURN_TRUE
#undef I32_SHIFTOP
#undef I64_SHIFTOP
......
......@@ -14,8 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
#define REQUIRE_CPU_FEATURE(name) \
if (!CpuFeatures::IsSupported(name)) return bailout("no " #name); \
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \
return __VA_ARGS__; \
} \
CpuFeatureScope feature(this, name);
namespace liftoff {
......@@ -1121,25 +1124,29 @@ void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
}
}
void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1, true);
roundsd(dst, src, kRoundUp);
return true;
}
void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1, true);
roundsd(dst, src, kRoundDown);
return true;
}
void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1, true);
roundsd(dst, src, kRoundToZero);
return true;
}
void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
REQUIRE_CPU_FEATURE(SSE4_1, true);
roundsd(dst, src, kRoundToNearest);
return true;
}
void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
......
......@@ -489,10 +489,10 @@ class LiftoffAssembler : public TurboAssembler {
// f64 unops.
inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_floor(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src);
inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
// type conversions.
......
......@@ -624,6 +624,20 @@ class LiftoffCompiler {
EmitUnOp<kWasmI32, kWasmI32>(emit_with_c_fallback);
}
template <ValueType type>
void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_->*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
ValueType sig_reps[] = {type};
FunctionSig sig(0, 1, sig_reps);
GenerateCCall(&dst, &sig, type, &src, ext_ref);
};
EmitUnOp<type, type>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
template <ValueType dst_type, ValueType src_type,
TypeConversionTrapping can_trap>
......@@ -676,6 +690,9 @@ class LiftoffCompiler {
__ emit_##fn(dst.fp(), src.fp()); \
}); \
break;
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(type, fn) \
EmitFloatUnOpWithCFallback<kWasm##type>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn);
#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
case WasmOpcode::kExpr##opcode: \
EmitTypeConversion<kWasm##dst_type, kWasm##src_type, can_trap>( \
......@@ -694,10 +711,10 @@ class LiftoffCompiler {
CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
CASE_FLOAT_UNOP(F64Ceil, F64, f64_ceil)
CASE_FLOAT_UNOP(F64Floor, F64, f64_floor)
CASE_FLOAT_UNOP(F64Trunc, F64, f64_trunc)
CASE_FLOAT_UNOP(F64NearestInt, F64, f64_nearest_int)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_ceil)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_floor)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_trunc)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64, f64_nearest_int)
CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
......@@ -747,6 +764,7 @@ class LiftoffCompiler {
}
#undef CASE_I32_UNOP
#undef CASE_FLOAT_UNOP
#undef CASE_FLOAT_UNOP_WITH_CFALLBACK
#undef CASE_TYPE_CONVERSION
}
......
......@@ -823,41 +823,41 @@ FP_UNOP(f64_sqrt, sqrt_d)
#undef FP_BINOP
#undef FP_UNOP
void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
Ceil_d_d(dst, src);
} else {
BAILOUT("emit_f64_ceil");
return true;
}
return false;
}
void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
Floor_d_d(dst, src);
} else {
BAILOUT("emit_f64_floor");
return true;
}
return false;
}
void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
Trunc_d_d(dst, src);
} else {
BAILOUT("emit_f64_trunc");
return true;
}
return false;
}
void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
DoubleRegister src) {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
Round_d_d(dst, src);
} else {
BAILOUT("emit_f64_nearest_int");
return true;
}
return false;
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
......
......@@ -694,6 +694,11 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst, src); \
}
#define FP_UNOP_RETURN_TRUE(name, instruction) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
instruction(dst, src); \
return true; \
}
FP_BINOP(f32_add, add_s)
FP_BINOP(f32_sub, sub_s)
......@@ -710,14 +715,15 @@ FP_BINOP(f64_sub, sub_d)
FP_BINOP(f64_mul, mul_d)
FP_BINOP(f64_div, div_d)
FP_UNOP(f64_abs, abs_d)
FP_UNOP(f64_ceil, Ceil_d_d)
FP_UNOP(f64_floor, Floor_d_d)
FP_UNOP(f64_trunc, Trunc_d_d)
FP_UNOP(f64_nearest_int, Round_d_d)
FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
FP_UNOP(f64_sqrt, sqrt_d)
#undef FP_BINOP
#undef FP_UNOP
#undef FP_UNOP_RETURN_TRUE
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
......
......@@ -130,6 +130,11 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
BAILOUT("fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
BAILOUT("fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
......@@ -185,10 +190,10 @@ UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
UNIMPLEMENTED_FP_UNOP(f64_ceil)
UNIMPLEMENTED_FP_UNOP(f64_floor)
UNIMPLEMENTED_FP_UNOP(f64_trunc)
UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
......@@ -196,6 +201,7 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
#undef UNIMPLEMENTED_I32_SHIFTOP
#undef UNIMPLEMENTED_I64_SHIFTOP
......
......@@ -130,6 +130,11 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
BAILOUT("fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
BAILOUT("fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
......@@ -185,10 +190,10 @@ UNIMPLEMENTED_FP_BINOP(f64_min)
UNIMPLEMENTED_FP_BINOP(f64_max)
UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
UNIMPLEMENTED_FP_UNOP(f64_ceil)
UNIMPLEMENTED_FP_UNOP(f64_floor)
UNIMPLEMENTED_FP_UNOP(f64_trunc)
UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
......@@ -196,6 +201,7 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
#undef UNIMPLEMENTED_I32_SHIFTOP
#undef UNIMPLEMENTED_I64_SHIFTOP
......
......@@ -1015,25 +1015,29 @@ void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
}
}
void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1, true);
Roundsd(dst, src, kRoundUp);
return true;
}
void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1, true);
Roundsd(dst, src, kRoundDown);
return true;
}
void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1, true);
Roundsd(dst, src, kRoundToZero);
return true;
}
void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
DoubleRegister src) {
REQUIRE_CPU_FEATURE(SSE4_1);
REQUIRE_CPU_FEATURE(SSE4_1, true);
Roundsd(dst, src, kRoundToNearest);
return true;
}
void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment