Commit eff959bb authored by balazs.kilvady's avatar balazs.kilvady Committed by Commit bot

MIPS: Followup '[turbofan] Introduce new operators Float32SubPreserveNan and...

MIPS: Followup '[turbofan] Introduce new operators Float32SubPreserveNan and Float64SubPreserveNan'.

Port 481502da

Float32SubMinusZero and Float64SubMinusZero tests are failing because MIPS does not preserve NaN payload according to Wasm spec. Implemented macro-assembler methods that check for NaN operands, and return the qNaN value with preserved payload and sign bits.

TEST=cctest/test-run-wasm/Run_WasmFloat32SubMinusZero, cctest/test-run-wasm/Run_WasmFloat64SubMinusZero

BUG=

patch from issue 2019693002 at patchset 140001 (http://crrev.com/2019693002#ps140001)

R=ahaas@chromium.org

Review-Url: https://codereview.chromium.org/2066483008
Cr-Commit-Position: refs/heads/master@{#37105}
parent 7d5969da
......@@ -1008,6 +1008,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsSubPreserveNanS:
__ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsMulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......@@ -1074,6 +1079,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsSubPreserveNanD:
__ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......
......@@ -46,6 +46,7 @@ namespace compiler {
V(MipsCmpS) \
V(MipsAddS) \
V(MipsSubS) \
V(MipsSubPreserveNanS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
......@@ -56,6 +57,7 @@ namespace compiler {
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
V(MipsSubPreserveNanD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
......
......@@ -755,7 +755,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitRRR(this, kMipsSubS, node);
VisitRRR(this, kMipsSubPreserveNanS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
......@@ -777,7 +777,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitRRR(this, kMipsSubD, node);
VisitRRR(this, kMipsSubPreserveNanD, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
......
......@@ -1172,6 +1172,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64SubPreserveNanS:
__ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64MulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......@@ -1222,6 +1227,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64SubPreserveNanD:
__ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......
......@@ -61,6 +61,7 @@ namespace compiler {
V(Mips64CmpS) \
V(Mips64AddS) \
V(Mips64SubS) \
V(Mips64SubPreserveNanS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64ModS) \
......@@ -71,6 +72,7 @@ namespace compiler {
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
V(Mips64SubPreserveNanD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \
......
......@@ -1160,7 +1160,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
VisitRRR(this, kMips64SubS, node);
VisitRRR(this, kMips64SubPreserveNanS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
......@@ -1182,7 +1182,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
VisitRRR(this, kMips64SubD, node);
VisitRRR(this, kMips64SubPreserveNanD, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
......
......@@ -18,6 +18,20 @@
namespace v8 {
namespace internal {
// Floating point constants.
const uint32_t kDoubleSignMask = HeapNumber::kSignMask;
const uint32_t kDoubleExponentShift = HeapNumber::kExponentShift;
const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
const uint32_t kDoubleNaNMask =
kDoubleSignMask | HeapNumber::kExponentMask | (1 << kDoubleNaNShift);
const uint32_t kSingleSignMask = kBinary32SignMask;
const uint32_t kSingleExponentMask = kBinary32ExponentMask;
const uint32_t kSingleExponentShift = kBinary32ExponentShift;
const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
const uint32_t kSingleNaNMask =
kSingleSignMask | kSingleExponentMask | (1 << kSingleNaNShift);
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
......@@ -4712,6 +4726,74 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&done);
}
void MacroAssembler::SubNanPreservePayloadAndSign_s(FloatRegister fd,
FloatRegister fs,
FloatRegister ft) {
FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
Label check_nan, save_payload, done;
Register scratch1 = t8;
Register scratch2 = t9;
sub_s(dest, fs, ft);
// Check if the result of subtraction is NaN.
BranchF32(nullptr, &check_nan, eq, fs, ft);
Branch(USE_DELAY_SLOT, &done);
dest.is(fd) ? nop() : mov_s(fd, dest);
bind(&check_nan);
// Check if first operand is a NaN.
mfc1(scratch1, fs);
BranchF32(nullptr, &save_payload, eq, fs, fs);
// Second operand must be a NaN.
mfc1(scratch1, ft);
bind(&save_payload);
// Reserve payload.
And(scratch1, scratch1,
Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
mfc1(scratch2, dest);
And(scratch2, scratch2, Operand(kSingleNaNMask));
Or(scratch2, scratch2, scratch1);
mtc1(scratch2, fd);
bind(&done);
}
void MacroAssembler::SubNanPreservePayloadAndSign_d(DoubleRegister fd,
DoubleRegister fs,
DoubleRegister ft) {
FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
Label check_nan, save_payload, done;
Register scratch1 = t8;
Register scratch2 = t9;
sub_d(dest, fs, ft);
// Check if the result of subtraction is NaN.
BranchF64(nullptr, &check_nan, eq, fs, ft);
Branch(USE_DELAY_SLOT, &done);
dest.is(fd) ? nop() : mov_d(fd, dest);
bind(&check_nan);
// Check if first operand is a NaN.
Mfhc1(scratch1, fs);
mov_s(dest, fs);
BranchF64(nullptr, &save_payload, eq, fs, fs);
// Second operand must be a NaN.
Mfhc1(scratch1, ft);
mov_s(dest, ft);
bind(&save_payload);
// Reserve payload.
And(scratch1, scratch1,
Operand(kDoubleSignMask | ((1 << kDoubleNaNShift) - 1)));
Mfhc1(scratch2, dest);
And(scratch2, scratch2, Operand(kDoubleNaNMask));
Or(scratch2, scratch2, scratch1);
Move_s(fd, dest);
Mthc1(scratch2, fd);
bind(&done);
}
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
......
......@@ -871,6 +871,12 @@ class MacroAssembler: public Assembler {
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
// Preserve value of a NaN operand
void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
FPURegister ft);
void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
FPURegister ft);
// FP32 mode: Move the general purpose register into
// the high part of the double-register pair.
// FP64 mode: Move the general-purpose register into
......
......@@ -17,6 +17,20 @@
namespace v8 {
namespace internal {
// Floating point constants.
const uint64_t kDoubleSignMask = Double::kSignMask;
const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
const uint64_t kDoubleNaNMask =
kDoubleSignMask | Double::kExponentMask | (1L << kDoubleNaNShift);
const uint32_t kSingleSignMask = kBinary32SignMask;
const uint32_t kSingleExponentMask = kBinary32ExponentMask;
const uint32_t kSingleExponentShift = kBinary32ExponentShift;
const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
const uint32_t kSingleNaNMask =
kSingleSignMask | kSingleExponentMask | (1 << kSingleNaNShift);
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
......@@ -4851,6 +4865,72 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
sdc1(double_result, MemOperand(scratch1, 0));
}
void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
FPURegister fs,
FPURegister ft) {
FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
Label check_nan, save_payload, done;
Register scratch1 = t8;
Register scratch2 = t9;
sub_s(dest, fs, ft);
// Check if the result of subtraction is NaN.
BranchF32(nullptr, &check_nan, eq, fs, ft);
Branch(USE_DELAY_SLOT, &done);
dest.is(fd) ? nop() : mov_s(fd, dest);
bind(&check_nan);
// Check if first operand is a NaN.
mfc1(scratch1, fs);
BranchF32(nullptr, &save_payload, eq, fs, fs);
// Second operand must be a NaN.
mfc1(scratch1, ft);
bind(&save_payload);
// Reserve payload.
And(scratch1, scratch1,
Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
mfc1(scratch2, dest);
And(scratch2, scratch2, Operand(kSingleNaNMask));
Or(scratch2, scratch2, scratch1);
mtc1(scratch2, fd);
bind(&done);
}
void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
FPURegister fs,
FPURegister ft) {
FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
Label check_nan, save_payload, done;
Register scratch1 = t8;
Register scratch2 = t9;
sub_d(dest, fs, ft);
// Check if the result of subtraction is NaN.
BranchF64(nullptr, &check_nan, eq, fs, ft);
Branch(USE_DELAY_SLOT, &done);
dest.is(fd) ? nop() : mov_d(fd, dest);
bind(&check_nan);
// Check if first operand is a NaN.
dmfc1(scratch1, fs);
BranchF64(nullptr, &save_payload, eq, fs, fs);
// Second operand must be a NaN.
dmfc1(scratch1, ft);
bind(&save_payload);
// Reserve payload.
li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
Dsubu(at, at, Operand(1));
And(scratch1, scratch1, at);
dmfc1(scratch2, dest);
And(scratch2, scratch2, Operand(kDoubleNaNMask));
Or(scratch2, scratch2, scratch1);
dmtc1(scratch2, fd);
bind(&done);
}
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
......
......@@ -929,6 +929,12 @@ class MacroAssembler: public Assembler {
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
// Preserve value of a NaN operand
void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
FPURegister ft);
void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
FPURegister ft);
void Madd_d(FPURegister fd,
FPURegister fr,
FPURegister fs,
......
......@@ -60,7 +60,6 @@ namespace wasm {
V(I64GtS, int64_t, >) \
V(I64GeS, int64_t, >=) \
V(F32Add, float, +) \
V(F32Sub, float, -) \
V(F32Mul, float, *) \
V(F32Div, float, /) \
V(F32Eq, float, ==) \
......@@ -70,7 +69,6 @@ namespace wasm {
V(F32Gt, float, >) \
V(F32Ge, float, >=) \
V(F64Add, double, +) \
V(F64Sub, double, -) \
V(F64Mul, double, *) \
V(F64Div, double, /) \
V(F64Eq, double, ==) \
......@@ -99,11 +97,13 @@ namespace wasm {
V(I32Rol, int32_t) \
V(I64Ror, int64_t) \
V(I64Rol, int64_t) \
V(F32Sub, float) \
V(F32Min, float) \
V(F32Max, float) \
V(F32CopySign, float) \
V(F64Min, double) \
V(F64Max, double) \
V(F64Sub, double) \
V(F64CopySign, double) \
V(I32AsmjsDivS, int32_t) \
V(I32AsmjsDivU, uint32_t) \
......@@ -311,6 +311,17 @@ static double quiet(double a) {
}
}
static inline float ExecuteF32Sub(float a, float b, TrapReason* trap) {
float result = a - b;
// Some architectures (e.g. MIPS) need extra checking to preserve the payload
// of a NaN operand.
if (result - result != 0) {
if (std::isnan(a)) return quiet(a);
if (std::isnan(b)) return quiet(b);
}
return result;
}
static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
if (std::isnan(a)) return quiet(a);
if (std::isnan(b)) return quiet(b);
......@@ -327,6 +338,17 @@ static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
return copysignf(a, b);
}
static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
double result = a - b;
// Some architectures (e.g. MIPS) need extra checking to preserve the payload
// of a NaN operand.
if (result - result != 0) {
if (std::isnan(a)) return quiet(a);
if (std::isnan(b)) return quiet(b);
}
return result;
}
static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
if (std::isnan(a)) return quiet(a);
if (std::isnan(b)) return quiet(b);
......
......@@ -609,15 +609,40 @@ WASM_EXEC_TEST(Float32SubMinusZero) {
WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_SUB(WASM_F32(-0.0), WASM_GET_LOCAL(0)));
CHECK_EQ(0x7fe00000, bit_cast<uint32_t>(r.Call(bit_cast<float>(0x7fa00000))));
uint32_t sNanValue =
bit_cast<uint32_t>(std::numeric_limits<float>::signaling_NaN());
uint32_t qNanValue =
bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
uint32_t payload = 0x00200000;
uint32_t expected = (qNanValue & 0xffc00000) | payload;
uint32_t operand = (sNanValue & 0xffc00000) | payload;
CHECK_EQ(expected, bit_cast<uint32_t>(r.Call(bit_cast<float>(operand))));
// Change the sign of the NaN.
expected |= 0x80000000;
operand |= 0x80000000;
CHECK_EQ(expected, bit_cast<uint32_t>(r.Call(bit_cast<float>(operand))));
}
WASM_EXEC_TEST(Float64SubMinusZero) {
WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_SUB(WASM_F64(-0.0), WASM_GET_LOCAL(0)));
CHECK_EQ(0x7ff8123456789abc,
bit_cast<uint64_t>(r.Call(bit_cast<double>(0x7ff0123456789abc))));
uint64_t sNanValue =
bit_cast<uint64_t>(std::numeric_limits<double>::signaling_NaN());
uint64_t qNanValue =
bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
uint64_t payload = 0x0000123456789abc;
uint64_t expected = (qNanValue & 0xfff8000000000000) | payload;
uint64_t operand = (sNanValue & 0xfff8000000000000) | payload;
CHECK_EQ(expected, bit_cast<uint64_t>(r.Call(bit_cast<double>(operand))));
// Change the sign of the NaN.
expected |= 0x8000000000000000;
operand |= 0x8000000000000000;
CHECK_EQ(expected, bit_cast<uint64_t>(r.Call(bit_cast<double>(operand))));
}
WASM_EXEC_TEST(Float64Neg) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment