Commit 16df1dfa authored by Ilja Iskovs's avatar Ilja Iskovs Committed by V8 LUCI CQ

[arm64][wasm-simd] Use Cm(0) for integer comparison with 0

Use an immediate zero operand for integer comparison when possible. This
gives ~1% runtime performance improvement in some benchmarks on Neoverse
N1.

Change-Id: I727a8104f8e6ca3d122d6b5b8b3d38d7bdd76c47
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3158327Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Commit-Queue: Martyn Capewell <martyn.capewell@arm.com>
Cr-Commit-Position: refs/heads/main@{#76847}
parent 6b578980
......@@ -1341,6 +1341,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DCHECK(allow_macro_instructions());
cmlt(vd, vn, imm);
}
void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions());
cmle(vd, vn, imm);
}
inline void Neg(const Register& rd, const Operand& operand);
inline void Negs(const Register& rd, const Operand& operand);
......@@ -1607,11 +1611,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions());
cmle(vd, vn, imm);
}
void Ld1(const VRegister& vt, const MemOperand& src) {
DCHECK(allow_macro_instructions());
ld1(vt, src);
......
......@@ -2099,6 +2099,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).Format(f), +0.0); \
break; \
}
#define SIMD_CM_L_CASE(Op, ImmOp) \
case Op: { \
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
DCHECK_EQ(instr->InputCount(), 1); \
__ Cm##ImmOp(i.OutputSimd128Register().Format(f), \
i.InputSimd128Register(0).Format(f), 0); \
break; \
}
#define SIMD_CM_G_CASE(Op, CmOp) \
case Op: { \
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
if (instr->InputCount() == 1) { \
__ Cm##CmOp(i.OutputSimd128Register().Format(f), \
i.InputSimd128Register(0).Format(f), 0); \
} else { \
__ Cm##CmOp(i.OutputSimd128Register().Format(f), \
i.InputSimd128Register(0).Format(f), \
i.InputSimd128Register(1).Format(f)); \
} \
break; \
}
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
......@@ -2378,17 +2399,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
SIMD_BINOP_LANE_SIZE_CASE(kArm64IEq, Cmeq);
SIMD_CM_G_CASE(kArm64IEq, eq);
case kArm64INe: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VRegister dst = i.OutputSimd128Register().Format(f);
__ Cmeq(dst, i.InputSimd128Register(0).Format(f),
i.InputSimd128Register(1).Format(f));
if (instr->InputCount() == 1) {
__ Cmeq(dst, i.InputSimd128Register(0).Format(f), 0);
} else {
__ Cmeq(dst, i.InputSimd128Register(0).Format(f),
i.InputSimd128Register(1).Format(f));
}
__ Mvn(dst, dst);
break;
}
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtS, Cmgt);
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeS, Cmge);
SIMD_CM_L_CASE(kArm64ILtS, lt);
SIMD_CM_L_CASE(kArm64ILeS, le);
SIMD_CM_G_CASE(kArm64IGtS, gt);
SIMD_CM_G_CASE(kArm64IGeS, ge);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
......
......@@ -242,6 +242,8 @@ namespace compiler {
V(Arm64INe) \
V(Arm64IGtS) \
V(Arm64IGeS) \
V(Arm64ILtS) \
V(Arm64ILeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4SConvertF32x4) \
......
......@@ -205,6 +205,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64INe:
case kArm64IGtS:
case kArm64IGeS:
case kArm64ILtS:
case kArm64ILeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
case kArm64I32x4SConvertF32x4:
......
......@@ -3544,14 +3544,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Sub, kArm64FSub, 32) \
V(F32x4Div, kArm64FDiv, 32) \
V(I64x2Sub, kArm64ISub, 64) \
V(I64x2Eq, kArm64IEq, 64) \
V(I64x2Ne, kArm64INe, 64) \
V(I64x2GtS, kArm64IGtS, 64) \
V(I64x2GeS, kArm64IGeS, 64) \
V(I32x4Eq, kArm64IEq, 32) \
V(I32x4Ne, kArm64INe, 32) \
V(I32x4GtS, kArm64IGtS, 32) \
V(I32x4GeS, kArm64IGeS, 32) \
V(I32x4GtU, kArm64IGtU, 32) \
V(I32x4GeU, kArm64IGeU, 32) \
V(I32x4MinS, kArm64IMinS, 32) \
......@@ -3562,10 +3554,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8SubSatS, kArm64ISubSatS, 16) \
V(I16x8AddSatU, kArm64IAddSatU, 16) \
V(I16x8SubSatU, kArm64ISubSatU, 16) \
V(I16x8Eq, kArm64IEq, 16) \
V(I16x8Ne, kArm64INe, 16) \
V(I16x8GtS, kArm64IGtS, 16) \
V(I16x8GeS, kArm64IGeS, 16) \
V(I16x8GtU, kArm64IGtU, 16) \
V(I16x8GeU, kArm64IGeU, 16) \
V(I16x8RoundingAverageU, kArm64RoundingAverageU, 16) \
......@@ -3579,10 +3567,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16SubSatS, kArm64ISubSatS, 8) \
V(I8x16AddSatU, kArm64IAddSatU, 8) \
V(I8x16SubSatU, kArm64ISubSatU, 8) \
V(I8x16Eq, kArm64IEq, 8) \
V(I8x16Ne, kArm64INe, 8) \
V(I8x16GtS, kArm64IGtS, 8) \
V(I8x16GeS, kArm64IGeS, 8) \
V(I8x16GtU, kArm64IGtU, 8) \
V(I8x16GeU, kArm64IGeU, 8) \
V(I8x16MinS, kArm64IMinS, 8) \
......@@ -3954,32 +3938,49 @@ bool isSimdZero(Arm64OperandGenerator& g, Node* node) {
}
} // namespace
#define VISIT_SIMD_FCM(Type, CmOp, CmOpposite, LaneSize) \
void InstructionSelector::Visit##Type##CmOp(Node* node) { \
Arm64OperandGenerator g(this); \
Node* left = node->InputAt(0); \
Node* right = node->InputAt(1); \
if (isSimdZero(g, left)) { \
Emit(kArm64F##CmOpposite | LaneSizeField::encode(LaneSize), \
g.DefineAsRegister(node), g.UseRegister(right)); \
return; \
} else if (isSimdZero(g, right)) { \
Emit(kArm64F##CmOp | LaneSizeField::encode(LaneSize), \
g.DefineAsRegister(node), g.UseRegister(left)); \
return; \
} \
VisitRRR(this, kArm64F##CmOp | LaneSizeField::encode(LaneSize), node); \
}
VISIT_SIMD_FCM(F64x2, Eq, Eq, 64)
VISIT_SIMD_FCM(F64x2, Ne, Ne, 64)
VISIT_SIMD_FCM(F64x2, Lt, Gt, 64)
VISIT_SIMD_FCM(F64x2, Le, Ge, 64)
VISIT_SIMD_FCM(F32x4, Eq, Eq, 32)
VISIT_SIMD_FCM(F32x4, Ne, Ne, 32)
VISIT_SIMD_FCM(F32x4, Lt, Gt, 32)
VISIT_SIMD_FCM(F32x4, Le, Ge, 32)
#undef VISIT_SIMD_FCM
#define VISIT_SIMD_CM(Type, T, CmOp, CmOpposite, LaneSize) \
void InstructionSelector::Visit##Type##CmOp(Node* node) { \
Arm64OperandGenerator g(this); \
Node* left = node->InputAt(0); \
Node* right = node->InputAt(1); \
if (isSimdZero(g, left)) { \
Emit(kArm64##T##CmOpposite | LaneSizeField::encode(LaneSize), \
g.DefineAsRegister(node), g.UseRegister(right)); \
return; \
} else if (isSimdZero(g, right)) { \
Emit(kArm64##T##CmOp | LaneSizeField::encode(LaneSize), \
g.DefineAsRegister(node), g.UseRegister(left)); \
return; \
} \
VisitRRR(this, kArm64##T##CmOp | LaneSizeField::encode(LaneSize), node); \
}
VISIT_SIMD_CM(F64x2, F, Eq, Eq, 64)
VISIT_SIMD_CM(F64x2, F, Ne, Ne, 64)
VISIT_SIMD_CM(F64x2, F, Lt, Gt, 64)
VISIT_SIMD_CM(F64x2, F, Le, Ge, 64)
VISIT_SIMD_CM(F32x4, F, Eq, Eq, 32)
VISIT_SIMD_CM(F32x4, F, Ne, Ne, 32)
VISIT_SIMD_CM(F32x4, F, Lt, Gt, 32)
VISIT_SIMD_CM(F32x4, F, Le, Ge, 32)
VISIT_SIMD_CM(I64x2, I, Eq, Eq, 64)
VISIT_SIMD_CM(I64x2, I, Ne, Ne, 64)
VISIT_SIMD_CM(I64x2, I, GtS, LtS, 64)
VISIT_SIMD_CM(I64x2, I, GeS, LeS, 64)
VISIT_SIMD_CM(I32x4, I, Eq, Eq, 32)
VISIT_SIMD_CM(I32x4, I, Ne, Ne, 32)
VISIT_SIMD_CM(I32x4, I, GtS, LtS, 32)
VISIT_SIMD_CM(I32x4, I, GeS, LeS, 32)
VISIT_SIMD_CM(I16x8, I, Eq, Eq, 16)
VISIT_SIMD_CM(I16x8, I, Ne, Ne, 16)
VISIT_SIMD_CM(I16x8, I, GtS, LtS, 16)
VISIT_SIMD_CM(I16x8, I, GeS, LeS, 16)
VISIT_SIMD_CM(I8x16, I, Eq, Eq, 8)
VISIT_SIMD_CM(I8x16, I, Ne, Ne, 8)
VISIT_SIMD_CM(I8x16, I, GtS, LtS, 8)
VISIT_SIMD_CM(I8x16, I, GeS, LeS, 8)
#undef VISIT_SIMD_CM
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
......
......@@ -656,6 +656,80 @@ WASM_SIMD_TEST(I64x2GeS) {
RunI64x2BinOpTest(execution_tier, kExprI64x2GeS, GreaterEqual);
}
namespace {
template <typename ScalarType>
void RunICompareOpConstImmTest(TestExecutionTier execution_tier,
WasmOpcode cmp_opcode, WasmOpcode splat_opcode,
ScalarType (*expected_op)(ScalarType,
ScalarType)) {
for (ScalarType x : compiler::ValueHelper::GetVector<ScalarType>()) {
WasmRunner<int32_t, ScalarType> r(execution_tier);
// Set up global to hold mask output for left and right cases
ScalarType* g1 = r.builder().template AddGlobal<ScalarType>(kWasmS128);
ScalarType* g2 = r.builder().template AddGlobal<ScalarType>(kWasmS128);
// Build fn to splat test values, perform compare op on both sides, and
// write the result.
byte value = 0;
byte temp = r.AllocateLocal(kWasmS128);
uint8_t const_buffer[kSimd128Size];
for (size_t i = 0; i < kSimd128Size / sizeof(ScalarType); i++) {
WriteLittleEndianValue<ScalarType>(
bit_cast<ScalarType*>(&const_buffer[0]) + i, x);
}
BUILD(r,
WASM_LOCAL_SET(temp,
WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(value))),
WASM_GLOBAL_SET(
0, WASM_SIMD_BINOP(cmp_opcode, WASM_SIMD_CONSTANT(const_buffer),
WASM_LOCAL_GET(temp))),
WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(cmp_opcode, WASM_LOCAL_GET(temp),
WASM_SIMD_CONSTANT(const_buffer))),
WASM_ONE);
for (ScalarType y : compiler::ValueHelper::GetVector<ScalarType>()) {
r.Call(y);
ScalarType expected1 = expected_op(x, y);
ScalarType expected2 = expected_op(y, x);
for (size_t i = 0; i < kSimd128Size / sizeof(ScalarType); i++) {
CHECK_EQ(expected1, LANE(g1, i));
CHECK_EQ(expected2, LANE(g2, i));
}
}
}
}
} // namespace
WASM_SIMD_TEST(I64x2EqZero) {
RunICompareOpConstImmTest<int64_t>(execution_tier, kExprI64x2Eq,
kExprI64x2Splat, Equal);
}
WASM_SIMD_TEST(I64x2NeZero) {
RunICompareOpConstImmTest<int64_t>(execution_tier, kExprI64x2Ne,
kExprI64x2Splat, NotEqual);
}
WASM_SIMD_TEST(I64x2GtZero) {
RunICompareOpConstImmTest<int64_t>(execution_tier, kExprI64x2GtS,
kExprI64x2Splat, Greater);
}
WASM_SIMD_TEST(I64x2GeZero) {
RunICompareOpConstImmTest<int64_t>(execution_tier, kExprI64x2GeS,
kExprI64x2Splat, GreaterEqual);
}
WASM_SIMD_TEST(I64x2LtZero) {
RunICompareOpConstImmTest<int64_t>(execution_tier, kExprI64x2LtS,
kExprI64x2Splat, Less);
}
WASM_SIMD_TEST(I64x2LeZero) {
RunICompareOpConstImmTest<int64_t>(execution_tier, kExprI64x2LeS,
kExprI64x2Splat, LessEqual);
}
WASM_SIMD_TEST(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier);
// Set up a global to hold output vector.
......@@ -1492,6 +1566,36 @@ WASM_SIMD_TEST(I32x4GeU) {
RunI32x4BinOpTest(execution_tier, kExprI32x4GeU, UnsignedGreaterEqual);
}
WASM_SIMD_TEST(I32x4EqZero) {
RunICompareOpConstImmTest<int32_t>(execution_tier, kExprI32x4Eq,
kExprI32x4Splat, Equal);
}
WASM_SIMD_TEST(I32x4NeZero) {
RunICompareOpConstImmTest<int32_t>(execution_tier, kExprI32x4Ne,
kExprI32x4Splat, NotEqual);
}
WASM_SIMD_TEST(I32x4GtZero) {
RunICompareOpConstImmTest<int32_t>(execution_tier, kExprI32x4GtS,
kExprI32x4Splat, Greater);
}
WASM_SIMD_TEST(I32x4GeZero) {
RunICompareOpConstImmTest<int32_t>(execution_tier, kExprI32x4GeS,
kExprI32x4Splat, GreaterEqual);
}
WASM_SIMD_TEST(I32x4LtZero) {
RunICompareOpConstImmTest<int32_t>(execution_tier, kExprI32x4LtS,
kExprI32x4Splat, Less);
}
WASM_SIMD_TEST(I32x4LeZero) {
RunICompareOpConstImmTest<int32_t>(execution_tier, kExprI32x4LeS,
kExprI32x4Splat, LessEqual);
}
WASM_SIMD_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_tier, kExprI32x4Shl, LogicalShiftLeft);
}
......@@ -1673,6 +1777,36 @@ WASM_SIMD_TEST(I16x8LeU) {
RunI16x8BinOpTest(execution_tier, kExprI16x8LeU, UnsignedLessEqual);
}
WASM_SIMD_TEST(I16x8EqZero) {
RunICompareOpConstImmTest<int16_t>(execution_tier, kExprI16x8Eq,
kExprI16x8Splat, Equal);
}
WASM_SIMD_TEST(I16x8NeZero) {
RunICompareOpConstImmTest<int16_t>(execution_tier, kExprI16x8Ne,
kExprI16x8Splat, NotEqual);
}
WASM_SIMD_TEST(I16x8GtZero) {
RunICompareOpConstImmTest<int16_t>(execution_tier, kExprI16x8GtS,
kExprI16x8Splat, Greater);
}
WASM_SIMD_TEST(I16x8GeZero) {
RunICompareOpConstImmTest<int16_t>(execution_tier, kExprI16x8GeS,
kExprI16x8Splat, GreaterEqual);
}
WASM_SIMD_TEST(I16x8LtZero) {
RunICompareOpConstImmTest<int16_t>(execution_tier, kExprI16x8LtS,
kExprI16x8Splat, Less);
}
WASM_SIMD_TEST(I16x8LeZero) {
RunICompareOpConstImmTest<int16_t>(execution_tier, kExprI16x8LeS,
kExprI16x8Splat, LessEqual);
}
WASM_SIMD_TEST(I16x8RoundingAverageU) {
RunI16x8BinOpTest<uint16_t>(execution_tier, kExprI16x8RoundingAverageU,
RoundingAverageUnsigned);
......@@ -2047,6 +2181,36 @@ WASM_SIMD_TEST(I8x16LeU) {
RunI8x16BinOpTest(execution_tier, kExprI8x16LeU, UnsignedLessEqual);
}
WASM_SIMD_TEST(I8x16EqZero) {
RunICompareOpConstImmTest<int8_t>(execution_tier, kExprI8x16Eq,
kExprI8x16Splat, Equal);
}
WASM_SIMD_TEST(I8x16NeZero) {
RunICompareOpConstImmTest<int8_t>(execution_tier, kExprI8x16Ne,
kExprI8x16Splat, NotEqual);
}
WASM_SIMD_TEST(I8x16GtZero) {
RunICompareOpConstImmTest<int8_t>(execution_tier, kExprI8x16GtS,
kExprI8x16Splat, Greater);
}
WASM_SIMD_TEST(I8x16GeZero) {
RunICompareOpConstImmTest<int8_t>(execution_tier, kExprI8x16GeS,
kExprI8x16Splat, GreaterEqual);
}
WASM_SIMD_TEST(I8x16LtZero) {
RunICompareOpConstImmTest<int8_t>(execution_tier, kExprI8x16LtS,
kExprI8x16Splat, Less);
}
WASM_SIMD_TEST(I8x16LeZero) {
RunICompareOpConstImmTest<int8_t>(execution_tier, kExprI8x16LeS,
kExprI8x16Splat, LessEqual);
}
WASM_SIMD_TEST(I8x16RoundingAverageU) {
RunI8x16BinOpTest<uint8_t>(execution_tier, kExprI8x16RoundingAverageU,
RoundingAverageUnsigned);
......
......@@ -5454,16 +5454,48 @@ TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsSimd128) {
expected_poke_pair, expected_poke);
}
struct SIMDConstZeroFcmTest {
struct SIMDConstZeroCmTest {
const bool is_zero;
const uint8_t lane_size;
const Operator* (MachineOperatorBuilder::*fcm_operator)();
const Operator* (MachineOperatorBuilder::*cm_operator)();
const ArchOpcode expected_op_left;
const ArchOpcode expected_op_right;
const size_t size;
};
static const SIMDConstZeroFcmTest SIMDConstZeroFcmTests[] = {
static const SIMDConstZeroCmTest SIMDConstZeroCmTests[] = {
{true, 8, &MachineOperatorBuilder::I8x16Eq, kArm64IEq, kArm64IEq, 1},
{true, 8, &MachineOperatorBuilder::I8x16Ne, kArm64INe, kArm64INe, 1},
{true, 8, &MachineOperatorBuilder::I8x16GeS, kArm64ILeS, kArm64IGeS, 1},
{true, 8, &MachineOperatorBuilder::I8x16GtS, kArm64ILtS, kArm64IGtS, 1},
{false, 8, &MachineOperatorBuilder::I8x16Eq, kArm64IEq, kArm64IEq, 2},
{false, 8, &MachineOperatorBuilder::I8x16Ne, kArm64INe, kArm64INe, 2},
{false, 8, &MachineOperatorBuilder::I8x16GeS, kArm64IGeS, kArm64IGeS, 2},
{false, 8, &MachineOperatorBuilder::I8x16GtS, kArm64IGtS, kArm64IGtS, 2},
{true, 16, &MachineOperatorBuilder::I16x8Eq, kArm64IEq, kArm64IEq, 1},
{true, 16, &MachineOperatorBuilder::I16x8Ne, kArm64INe, kArm64INe, 1},
{true, 16, &MachineOperatorBuilder::I16x8GeS, kArm64ILeS, kArm64IGeS, 1},
{true, 16, &MachineOperatorBuilder::I16x8GtS, kArm64ILtS, kArm64IGtS, 1},
{false, 16, &MachineOperatorBuilder::I16x8Eq, kArm64IEq, kArm64IEq, 2},
{false, 16, &MachineOperatorBuilder::I16x8Ne, kArm64INe, kArm64INe, 2},
{false, 16, &MachineOperatorBuilder::I16x8GeS, kArm64IGeS, kArm64IGeS, 2},
{false, 16, &MachineOperatorBuilder::I16x8GtS, kArm64IGtS, kArm64IGtS, 2},
{true, 32, &MachineOperatorBuilder::I32x4Eq, kArm64IEq, kArm64IEq, 1},
{true, 32, &MachineOperatorBuilder::I32x4Ne, kArm64INe, kArm64INe, 1},
{true, 32, &MachineOperatorBuilder::I32x4GeS, kArm64ILeS, kArm64IGeS, 1},
{true, 32, &MachineOperatorBuilder::I32x4GtS, kArm64ILtS, kArm64IGtS, 1},
{false, 32, &MachineOperatorBuilder::I32x4Eq, kArm64IEq, kArm64IEq, 2},
{false, 32, &MachineOperatorBuilder::I32x4Ne, kArm64INe, kArm64INe, 2},
{false, 32, &MachineOperatorBuilder::I32x4GeS, kArm64IGeS, kArm64IGeS, 2},
{false, 32, &MachineOperatorBuilder::I32x4GtS, kArm64IGtS, kArm64IGtS, 2},
{true, 64, &MachineOperatorBuilder::I64x2Eq, kArm64IEq, kArm64IEq, 1},
{true, 64, &MachineOperatorBuilder::I64x2Ne, kArm64INe, kArm64INe, 1},
{true, 64, &MachineOperatorBuilder::I64x2GeS, kArm64ILeS, kArm64IGeS, 1},
{true, 64, &MachineOperatorBuilder::I64x2GtS, kArm64ILtS, kArm64IGtS, 1},
{false, 64, &MachineOperatorBuilder::I64x2Eq, kArm64IEq, kArm64IEq, 2},
{false, 64, &MachineOperatorBuilder::I64x2Ne, kArm64INe, kArm64INe, 2},
{false, 64, &MachineOperatorBuilder::I64x2GeS, kArm64IGeS, kArm64IGeS, 2},
{false, 64, &MachineOperatorBuilder::I64x2GtS, kArm64IGtS, kArm64IGtS, 2},
{true, 64, &MachineOperatorBuilder::F64x2Eq, kArm64FEq, kArm64FEq, 1},
{true, 64, &MachineOperatorBuilder::F64x2Ne, kArm64FNe, kArm64FNe, 1},
{true, 64, &MachineOperatorBuilder::F64x2Lt, kArm64FGt, kArm64FLt, 1},
......@@ -5482,11 +5514,11 @@ static const SIMDConstZeroFcmTest SIMDConstZeroFcmTests[] = {
{false, 32, &MachineOperatorBuilder::F32x4Le, kArm64FLe, kArm64FLe, 2},
};
using InstructionSelectorSIMDConstZeroFcmTest =
InstructionSelectorTestWithParam<SIMDConstZeroFcmTest>;
using InstructionSelectorSIMDConstZeroCmTest =
InstructionSelectorTestWithParam<SIMDConstZeroCmTest>;
TEST_P(InstructionSelectorSIMDConstZeroFcmTest, ConstZero) {
const SIMDConstZeroFcmTest param = GetParam();
TEST_P(InstructionSelectorSIMDConstZeroCmTest, ConstZero) {
const SIMDConstZeroCmTest param = GetParam();
byte data[16] = {};
if (!param.is_zero) data[0] = 0xff;
// Const node on the left
......@@ -5494,7 +5526,7 @@ TEST_P(InstructionSelectorSIMDConstZeroFcmTest, ConstZero) {
StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
Node* cnst = m.S128Const(data);
Node* fcm =
m.AddNode((m.machine()->*param.fcm_operator)(), cnst, m.Parameter(0));
m.AddNode((m.machine()->*param.cm_operator)(), cnst, m.Parameter(0));
m.Return(fcm);
Stream s = m.Build();
ASSERT_EQ(param.size, s.size());
......@@ -5516,7 +5548,7 @@ TEST_P(InstructionSelectorSIMDConstZeroFcmTest, ConstZero) {
StreamBuilder m(this, MachineType::Simd128(), MachineType::Simd128());
Node* cnst = m.S128Const(data);
Node* fcm =
m.AddNode((m.machine()->*param.fcm_operator)(), m.Parameter(0), cnst);
m.AddNode((m.machine()->*param.cm_operator)(), m.Parameter(0), cnst);
m.Return(fcm);
Stream s = m.Build();
ASSERT_EQ(param.size, s.size());
......@@ -5536,8 +5568,8 @@ TEST_P(InstructionSelectorSIMDConstZeroFcmTest, ConstZero) {
}
INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorSIMDConstZeroFcmTest,
::testing::ValuesIn(SIMDConstZeroFcmTests));
InstructionSelectorSIMDConstZeroCmTest,
::testing::ValuesIn(SIMDConstZeroCmTests));
} // namespace
} // namespace compiler
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment