Commit 19741ac9 authored by ahaas's avatar ahaas Committed by Commit bot

[turbofan] Implemented the optional Float32RoundTruncate operator.

The Float32RoundTruncate operator rounds float32 numbers towards zero.
The operator is currently implemented on x64, ia32, arm, and arm64.

Additionally I added support for the float32 vrintz, vrintn, and vrinta
instructions to the arm simulator.

R=titzer@chromium.org

Review URL: https://codereview.chromium.org/1468303005

Cr-Commit-Position: refs/heads/master@{#32301}
parent 8d90b927
......@@ -3362,6 +3362,20 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
......@@ -3376,6 +3390,20 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
}
void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
......@@ -3446,6 +3474,20 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
}
void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
0x5 * B9 | B7 | B6 | m * B5 | vm);
}
void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
......
......@@ -1211,12 +1211,16 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
// ARMv8 rounding instructions.
void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
......
......@@ -1781,28 +1781,28 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if (dp_operation) {
Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
}
break;
case 0x1:
if (dp_operation) {
Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
}
break;
case 0x2:
if (dp_operation) {
Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
}
break;
case 0x3:
if (dp_operation) {
Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
}
break;
default:
......
......@@ -3178,10 +3178,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
double dm_value = get_double_from_d_register(vm);
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = truncf(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else {
UNREACHABLE(); // Not used by V8.
}
......@@ -3882,25 +3889,14 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
dd_value = round(dm_value);
break;
case 0x1: { // vrintn - round with ties to even
dd_value = std::floor(dm_value);
double error = dm_value - dd_value;
// Take care of correctly handling the range [-0.5, -0.0], which
// must yield -0.0.
if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
dd_value = -0.0;
// If the error is greater than 0.5, or is equal to 0.5 and the
// integer result is odd, round up.
} else if ((error > 0.5) ||
((error == 0.5) && (fmod(dd_value, 2) != 0))) {
dd_value++;
}
dd_value = nearbyint(dm_value);
break;
}
case 0x2: // vrintp - ceil
dd_value = std::ceil(dm_value);
dd_value = ceil(dm_value);
break;
case 0x3: // vrintm - floor
dd_value = std::floor(dm_value);
dd_value = floor(dm_value);
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
......
......@@ -803,6 +803,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVrintpF64:
__ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVrintzF32:
__ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintzF64:
__ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
......
......@@ -69,6 +69,7 @@ namespace compiler {
V(ArmVrintmF64) \
V(ArmVrintpF32) \
V(ArmVrintpF64) \
V(ArmVrintzF32) \
V(ArmVrintzF64) \
V(ArmVrintaF64) \
V(ArmVrintnF64) \
......
......@@ -1135,6 +1135,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, kArmVrintzF32, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kArmVrintzF64, node);
}
......@@ -1579,6 +1584,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat64RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat64RoundTiesEven;
......
......@@ -638,6 +638,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64RoundTiesAway:
__ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Float32RoundTruncate:
__ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArm64Float64RoundTruncate:
__ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
......
......@@ -104,6 +104,7 @@ namespace compiler {
V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
V(Arm64Float64RoundTiesEven) \
V(Arm64Float32ToFloat64) \
......
......@@ -1493,6 +1493,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, kArm64Float32RoundTruncate, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kArm64Float64RoundTruncate, node);
}
......@@ -2080,6 +2085,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat64RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat64RoundTiesEven |
......
......@@ -871,6 +871,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
......@@ -1298,6 +1303,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat64RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven;
}
......
......@@ -931,6 +931,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat32(node), VisitFloat32RoundUp(node);
case IrOpcode::kFloat64RoundUp:
return MarkAsFloat64(node), VisitFloat64RoundUp(node);
case IrOpcode::kFloat32RoundTruncate:
return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
......
......@@ -190,6 +190,7 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundUp, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundUp, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
......
......@@ -120,21 +120,23 @@ class MachineOperatorBuilder final : public ZoneObject {
kFloat64RoundDown = 1u << 5,
kFloat32RoundUp = 1u << 6,
kFloat64RoundUp = 1u << 7,
kFloat64RoundTruncate = 1u << 8,
kFloat64RoundTiesEven = 1u << 9,
kFloat64RoundTiesAway = 1u << 10,
kInt32DivIsSafe = 1u << 11,
kUint32DivIsSafe = 1u << 12,
kWord32ShiftIsSafe = 1u << 13,
kWord32Ctz = 1u << 14,
kWord64Ctz = 1u << 15,
kWord32Popcnt = 1u << 16,
kWord64Popcnt = 1u << 17,
kFloat32RoundTruncate = 1u << 8,
kFloat64RoundTruncate = 1u << 9,
kFloat64RoundTiesEven = 1u << 10,
kFloat64RoundTiesAway = 1u << 11,
kInt32DivIsSafe = 1u << 12,
kUint32DivIsSafe = 1u << 13,
kWord32ShiftIsSafe = 1u << 14,
kWord32Ctz = 1u << 15,
kWord64Ctz = 1u << 16,
kWord32Popcnt = 1u << 17,
kWord64Popcnt = 1u << 18,
kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
kFloat64RoundUp | kFloat32RoundTruncate |
kFloat64RoundTruncate | kFloat64RoundTiesAway |
kFloat64RoundTiesEven | kWord32Ctz | kWord64Ctz |
kWord32Popcnt | kWord64Popcnt
};
typedef base::Flags<Flag, unsigned> Flags;
......@@ -274,6 +276,7 @@ class MachineOperatorBuilder final : public ZoneObject {
const OptionalOperator Float64RoundDown();
const OptionalOperator Float32RoundUp();
const OptionalOperator Float64RoundUp();
const OptionalOperator Float32RoundTruncate();
const OptionalOperator Float64RoundTruncate();
const OptionalOperator Float64RoundTiesAway();
const OptionalOperator Float64RoundTiesEven();
......
......@@ -663,6 +663,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMipsFloat64RoundTruncate, node);
}
......
......@@ -997,6 +997,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMips64Float64RoundTruncate, node);
}
......
......@@ -308,6 +308,7 @@
V(Float64RoundDown) \
V(Float32RoundUp) \
V(Float64RoundUp) \
V(Float32RoundTruncate) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
V(Float64RoundTiesEven) \
......
......@@ -1144,6 +1144,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kPPC_TruncateDouble, node);
}
......
......@@ -495,6 +495,9 @@ class RawMachineAssembler {
Node* Float64RoundUp(Node* a) {
return AddNode(machine()->Float64RoundUp().op(), a);
}
Node* Float32RoundTruncate(Node* a) {
return AddNode(machine()->Float32RoundTruncate().op(), a);
}
Node* Float64RoundTruncate(Node* a) {
return AddNode(machine()->Float64RoundTruncate().op(), a);
}
......
......@@ -2294,6 +2294,12 @@ Type* Typer::Visitor::TypeFloat64RoundUp(Node* node) {
}
Type* Typer::Visitor::TypeFloat32RoundTruncate(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Type::Number();
}
Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Type::Number();
......
......@@ -893,6 +893,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat32RoundUp:
case IrOpcode::kFloat64RoundUp:
case IrOpcode::kFloat32RoundTruncate:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64RoundTiesEven:
......
......@@ -1158,6 +1158,11 @@ void InstructionSelector::VisitFloat64RoundUp(Node* node) {
}
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
......@@ -1705,6 +1710,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat64RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven;
}
......
......@@ -858,6 +858,11 @@ void InstructionSelector::VisitFloat32RoundUp(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64RoundUp(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
UNREACHABLE();
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
......
......@@ -5160,7 +5160,7 @@ TEST(RunFloat64RoundDown1) {
m.Return(m.Float64RoundDown(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(std::floor(*i), m.Call(*i)); }
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(floor(*i), m.Call(*i)); }
}
......@@ -5172,7 +5172,7 @@ TEST(RunFloat64RoundDown2) {
m.Parameter(0)))));
for (size_t i = 0; i < arraysize(kValues); ++i) {
CHECK_EQ(std::ceil(kValues[i]), m.Call(kValues[i]));
CHECK_EQ(ceil(kValues[i]), m.Call(kValues[i]));
}
}
......@@ -5182,7 +5182,7 @@ TEST(RunFloat32RoundUp) {
if (!m.machine()->Float32RoundUp().IsSupported()) return;
m.Return(m.Float32RoundUp(m.Parameter(0)));
FOR_FLOAT32_INPUTS(i) { CheckFloatEq(std::ceil(*i), m.Call(*i)); }
FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceilf(*i), m.Call(*i)); }
}
......@@ -5191,7 +5191,7 @@ TEST(RunFloat64RoundUp) {
if (!m.machine()->Float64RoundUp().IsSupported()) return;
m.Return(m.Float64RoundUp(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(std::ceil(*i), m.Call(*i)); }
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(ceil(*i), m.Call(*i)); }
}
......@@ -5204,6 +5204,16 @@ TEST(RunFloat64RoundTiesEven) {
}
TEST(RunFloat32RoundTruncate) {
BufferedRawMachineAssemblerTester<float> m(kMachFloat32);
if (!m.machine()->Float32RoundTruncate().IsSupported()) return;
m.Return(m.Float32RoundTruncate(m.Parameter(0)));
FOR_FLOAT32_INPUTS(i) { CheckFloatEq(truncf(*i), m.Call(*i)); }
}
TEST(RunFloat64RoundTruncate) {
BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
if (!m.machine()->Float64RoundTruncate().IsSupported()) return;
......
......@@ -1894,6 +1894,111 @@ TEST(code_relative_offset) {
}
TEST(ARMv8_float32_vrintX) {
// Test the vrintX floating point instructions.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
float input;
float ar;
float nr;
float mr;
float pr;
float zr;
} T;
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
// the floats.
Assembler assm(isolate, NULL, 0);
Label L, C;
if (CpuFeatures::IsSupported(ARMv8)) {
CpuFeatureScope scope(&assm, ARMv8);
__ mov(ip, Operand(sp));
__ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ mov(r4, Operand(r0));
// Test vrinta
__ vldr(s6, r4, offsetof(T, input));
__ vrinta(s5, s6);
__ vstr(s5, r4, offsetof(T, ar));
// Test vrintn
__ vldr(s6, r4, offsetof(T, input));
__ vrintn(s5, s6);
__ vstr(s5, r4, offsetof(T, nr));
// Test vrintp
__ vldr(s6, r4, offsetof(T, input));
__ vrintp(s5, s6);
__ vstr(s5, r4, offsetof(T, pr));
// Test vrintm
__ vldr(s6, r4, offsetof(T, input));
__ vrintm(s5, s6);
__ vstr(s5, r4, offsetof(T, mr));
// Test vrintz
__ vldr(s6, r4, offsetof(T, input));
__ vrintz(s5, s6);
__ vstr(s5, r4, offsetof(T, zr));
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
F3 f = FUNCTION_CAST<F3>(code->entry());
Object* dummy = nullptr;
USE(dummy);
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
t.input = input_val; \
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
CHECK_EQ(ares, t.ar); \
CHECK_EQ(nres, t.nr); \
CHECK_EQ(mres, t.mr); \
CHECK_EQ(pres, t.pr); \
CHECK_EQ(zres, t.zr);
CHECK_VRINT(-0.5, -1.0, -0.0, -1.0, -0.0, -0.0)
CHECK_VRINT(-0.6, -1.0, -1.0, -1.0, -0.0, -0.0)
CHECK_VRINT(-1.1, -1.0, -1.0, -2.0, -1.0, -1.0)
CHECK_VRINT(0.5, 1.0, 0.0, 0.0, 1.0, 0.0)
CHECK_VRINT(0.6, 1.0, 1.0, 0.0, 1.0, 0.0)
CHECK_VRINT(1.1, 1.0, 1.0, 1.0, 2.0, 1.0)
float inf = std::numeric_limits<float>::infinity();
CHECK_VRINT(inf, inf, inf, inf, inf, inf)
CHECK_VRINT(-inf, -inf, -inf, -inf, -inf, -inf)
CHECK_VRINT(-0.0, -0.0, -0.0, -0.0, -0.0, -0.0)
// Check NaN propagation.
float nan = std::numeric_limits<float>::quiet_NaN();
t.input = nan;
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.ar));
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.nr));
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.mr));
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.pr));
CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.zr));
#undef CHECK_VRINT
}
}
TEST(ARMv8_vrintX) {
// Test the vrintX floating point instructions.
CcTest::InitializeVM();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment