Commit 96c90f6c authored by ahaas's avatar ahaas Committed by Commit bot

[wasm] Use the Float64Max/Min machine operators to implement F64Max/Min.

I had to adjust Float64Max/Min on x64 slightly to return the default
wasm NaN (0x7FF0000000000000) instead of the all-ones NaN
(0xFFFFFFFFFFFFFFFF).

R=titzer@chromium.org, bmeurer@chromium.org

Review-Url: https://codereview.chromium.org/2204963002
Cr-Commit-Position: refs/heads/master@{#38410}
parent 265399ed
......@@ -197,17 +197,33 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
Register const result_;
};
class OutOfLineLoadNaN final : public OutOfLineCode {
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final { __ pcmpeqd(result_, result_); }
void Generate() final {
__ xorps(result_, result_);
__ divss(result_, result_);
}
private:
XMMRegister const result_;
};
class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ xorpd(result_, result_);
__ divsd(result_, result_);
}
private:
XMMRegister const result_;
};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
......@@ -270,7 +286,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
do { \
auto result = i.OutputDoubleRegister(); \
auto offset = i.InputRegister(0); \
......@@ -1039,7 +1055,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool = new (zone()) OutOfLineLoadNaN(this, i.OutputDoubleRegister());
auto ool =
new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
......@@ -1063,7 +1080,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool = new (zone()) OutOfLineLoadNaN(this, i.OutputDoubleRegister());
auto ool =
new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
......@@ -1496,10 +1514,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
......
......@@ -605,11 +605,13 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
case wasm::kExprF32Min:
return BuildF32Min(left, right);
case wasm::kExprF64Min:
return BuildF64Min(left, right);
op = m->Float64Min();
break;
case wasm::kExprF32Max:
return BuildF32Max(left, right);
case wasm::kExprF64Max:
return BuildF64Max(left, right);
op = m->Float64Max();
break;
case wasm::kExprF64Pow:
return BuildF64Pow(left, right);
case wasm::kExprF64Atan2:
......@@ -1230,46 +1232,6 @@ Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
}
Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
Diamond left_le_right(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Le, left, right));
Diamond right_lt_left(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Lt, right, left));
Diamond left_is_not_nan(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Eq, left, left));
return left_le_right.Phi(
wasm::kAstF64, left,
right_lt_left.Phi(
wasm::kAstF64, right,
left_is_not_nan.Phi(
wasm::kAstF64,
Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
}
Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
Diamond left_ge_right(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Ge, left, right));
Diamond right_gt_left(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Lt, right, left));
Diamond left_is_not_nan(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Eq, left, left));
return left_ge_right.Phi(
wasm::kAstF64, left,
right_gt_left.Phi(
wasm::kAstF64, right,
left_is_not_nan.Phi(
wasm::kAstF64,
Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
}
Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
......
......@@ -252,8 +252,6 @@ class WasmGraphBuilder {
Node* BuildF64CopySign(Node* left, Node* right);
Node* BuildF32Min(Node* left, Node* right);
Node* BuildF32Max(Node* left, Node* right);
Node* BuildF64Min(Node* left, Node* right);
Node* BuildF64Max(Node* left, Node* right);
Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
......
......@@ -164,18 +164,33 @@ class OutOfLineLoadZero final : public OutOfLineCode {
Register const result_;
};
class OutOfLineLoadNaN final : public OutOfLineCode {
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final { __ Pcmpeqd(result_, result_); }
void Generate() final {
__ Xorps(result_, result_);
__ Divss(result_, result_);
}
private:
XMMRegister const result_;
};
class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ Xorpd(result_, result_);
__ Divsd(result_, result_);
}
private:
XMMRegister const result_;
};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
......@@ -381,7 +396,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
do { \
auto result = i.OutputDoubleRegister(); \
auto buffer = i.InputRegister(0); \
......@@ -1289,7 +1304,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool = new (zone()) OutOfLineLoadNaN(this, i.OutputDoubleRegister());
auto ool =
new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
......@@ -1313,7 +1329,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool = new (zone()) OutOfLineLoadNaN(this, i.OutputDoubleRegister());
auto ool =
new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
......@@ -1965,10 +1982,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
ASSEMBLE_CHECKED_LOAD_FLOAT(Movss, OutOfLineLoadFloat32NaN);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd, OutOfLineLoadFloat64NaN);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(movb);
......
......@@ -4400,7 +4400,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DeoptimizeIf(not_equal, instr,
DeoptimizeReason::kNotAHeapNumberUndefined);
__ pcmpeqd(result_reg, result_reg);
__ xorpd(result_reg, result_reg);
__ divsd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
......
......@@ -4693,7 +4693,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DeoptimizeIf(not_equal, instr,
DeoptimizeReason::kNotAHeapNumberUndefined);
__ Pcmpeqd(result_reg, result_reg);
__ Xorpd(result_reg, result_reg);
__ Divsd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
......
......@@ -352,13 +352,23 @@ static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
if (std::isnan(a)) return quiet(a);
if (std::isnan(b)) return quiet(b);
return std::min(a, b);
if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) {
// a and b are zero, and the sign differs: return -0.0.
return -0.0;
} else {
return (a < b) ? a : b;
}
}
static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
if (std::isnan(a)) return quiet(a);
if (std::isnan(b)) return quiet(b);
return std::max(a, b);
if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) {
// a and b are zero, and the sign differs: return 0.0.
return 0.0;
} else {
return (a > b) ? a : b;
}
}
static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
......
......@@ -956,6 +956,7 @@ class MacroAssembler: public Assembler {
AVX_OP2_XO(Addsd, addsd)
AVX_OP2_XO(Subsd, subsd)
AVX_OP2_XO(Mulsd, mulsd)
AVX_OP2_XO(Divss, divss)
AVX_OP2_XO(Divsd, divsd)
AVX_OP2_XO(Andps, andps)
AVX_OP2_XO(Andpd, andpd)
......
......@@ -2571,19 +2571,19 @@ WASM_EXEC_TEST(F64Min) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
double expected;
if (*i < *j) {
expected = *i;
} else if (*j < *i) {
expected = *j;
} else if (*i != *i) {
// If *i or *j is NaN, then the result is NaN.
expected = *i;
double result = r.Call(*i, *j);
if (std::isnan(*i) || std::isnan(*j)) {
// If one of the inputs is nan, the result should be nan.
CHECK(std::isnan(result));
} else if ((*i == 0.0) && (*j == 0.0) &&
(copysign(1.0, *i) != copysign(1.0, *j))) {
// If one input is +0.0 and the other input is -0.0, the result should
// be -0.0.
CHECK_EQ(bit_cast<uint64_t>(-0.0), bit_cast<uint64_t>(result));
} else {
expected = *j;
double expected = *i < *j ? *i : *j;
CHECK_DOUBLE_EQ(expected, result);
}
CHECK_DOUBLE_EQ(expected, r.Call(*i, *j));
}
}
}
......@@ -2619,92 +2619,23 @@ WASM_EXEC_TEST(F64Max) {
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
double expected;
if (*i > *j) {
expected = *i;
} else if (*j > *i) {
expected = *j;
} else if (*i != *i) {
// If *i or *j is NaN, then the result is NaN.
expected = *i;
double result = r.Call(*i, *j);
if (std::isnan(*i) || std::isnan(*j)) {
// If one of the inputs is nan, the result should be nan.
CHECK(std::isnan(result));
} else if ((*i == 0.0) && (*j == 0.0) &&
(copysign(1.0, *i) != copysign(1.0, *j))) {
// If one input is +0.0 and the other input is -0.0, the result should
// be -0.0.
CHECK_EQ(bit_cast<uint64_t>(0.0), bit_cast<uint64_t>(result));
} else {
expected = *j;
double expected = *i > *j ? *i : *j;
CHECK_DOUBLE_EQ(expected, result);
}
CHECK_DOUBLE_EQ(expected, r.Call(*i, *j));
}
}
}
// TODO(ahaas): Fix on mips and reenable.
#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
WASM_EXEC_TEST(F32Min_Snan) {
// Test that the instruction does not return a signalling NaN.
{
WasmRunner<float> r(execution_mode);
BUILD(r,
WASM_F32_MIN(WASM_F32(bit_cast<float>(0xff80f1e2)), WASM_F32(57.67)));
CHECK_EQ(0xffc0f1e2, bit_cast<uint32_t>(r.Call()));
}
{
WasmRunner<float> r(execution_mode);
BUILD(r,
WASM_F32_MIN(WASM_F32(45.73), WASM_F32(bit_cast<float>(0x7f80f1e2))));
CHECK_EQ(0x7fc0f1e2, bit_cast<uint32_t>(r.Call()));
}
}
WASM_EXEC_TEST(F32Max_Snan) {
// Test that the instruction does not return a signalling NaN.
{
WasmRunner<float> r(execution_mode);
BUILD(r,
WASM_F32_MAX(WASM_F32(bit_cast<float>(0xff80f1e2)), WASM_F32(57.67)));
CHECK_EQ(0xffc0f1e2, bit_cast<uint32_t>(r.Call()));
}
{
WasmRunner<float> r(execution_mode);
BUILD(r,
WASM_F32_MAX(WASM_F32(45.73), WASM_F32(bit_cast<float>(0x7f80f1e2))));
CHECK_EQ(0x7fc0f1e2, bit_cast<uint32_t>(r.Call()));
}
}
WASM_EXEC_TEST(F64Min_Snan) {
// Test that the instruction does not return a signalling NaN.
{
WasmRunner<double> r(execution_mode);
BUILD(r, WASM_F64_MIN(WASM_F64(bit_cast<double>(0xfff000000000f1e2)),
WASM_F64(57.67)));
CHECK_EQ(0xfff800000000f1e2, bit_cast<uint64_t>(r.Call()));
}
{
WasmRunner<double> r(execution_mode);
BUILD(r, WASM_F64_MIN(WASM_F64(45.73),
WASM_F64(bit_cast<double>(0x7ff000000000f1e2))));
CHECK_EQ(0x7ff800000000f1e2, bit_cast<uint64_t>(r.Call()));
}
}
WASM_EXEC_TEST(F64Max_Snan) {
// Test that the instruction does not return a signalling NaN.
{
WasmRunner<double> r(execution_mode);
BUILD(r, WASM_F64_MAX(WASM_F64(bit_cast<double>(0xfff000000000f1e2)),
WASM_F64(57.67)));
CHECK_EQ(0xfff800000000f1e2, bit_cast<uint64_t>(r.Call()));
}
{
WasmRunner<double> r(execution_mode);
BUILD(r, WASM_F64_MAX(WASM_F64(45.73),
WASM_F64(bit_cast<double>(0x7ff000000000f1e2))));
CHECK_EQ(0x7ff800000000f1e2, bit_cast<uint64_t>(r.Call()));
}
}
#endif
WASM_EXEC_TEST(I32SConvertF32) {
WasmRunner<int32_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment