Commit 9866cb59 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd] Implement F64x2 eq ne gt ge lt le for arm64

Bug: v8:8460
Change-Id: If7fd1a497b2a3b74b921e175491233be884d574b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1749713Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63384}
parent 3e2830a1
......@@ -1800,6 +1800,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D);
SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D);
SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D);
SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D);
case kArm64F64x2Ne: {
VRegister dst = i.OutputSimd128Register().V2D();
__ Fcmeq(dst, i.InputSimd128Register(0).V2D(),
i.InputSimd128Register(1).V2D());
__ Mvn(dst, dst);
break;
}
case kArm64F64x2Lt: {
__ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
i.InputSimd128Register(0).V2D());
break;
}
case kArm64F64x2Le: {
__ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
i.InputSimd128Register(0).V2D());
break;
}
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
......
......@@ -181,6 +181,10 @@ namespace compiler {
V(Arm64F64x2Div) \
V(Arm64F64x2Min) \
V(Arm64F64x2Max) \
V(Arm64F64x2Eq) \
V(Arm64F64x2Ne) \
V(Arm64F64x2Lt) \
V(Arm64F64x2Le) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
......
......@@ -148,6 +148,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Div:
case kArm64F64x2Min:
case kArm64F64x2Max:
case kArm64F64x2Eq:
case kArm64F64x2Ne:
case kArm64F64x2Lt:
case kArm64F64x2Le:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
......
......@@ -3105,6 +3105,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Div, kArm64F64x2Div) \
V(F64x2Min, kArm64F64x2Min) \
V(F64x2Max, kArm64F64x2Max) \
V(F64x2Eq, kArm64F64x2Eq) \
V(F64x2Ne, kArm64F64x2Ne) \
V(F64x2Lt, kArm64F64x2Lt) \
V(F64x2Le, kArm64F64x2Le) \
V(F32x4Add, kArm64F32x4Add) \
V(F32x4AddHoriz, kArm64F32x4AddHoriz) \
V(F32x4Sub, kArm64F32x4Sub) \
......
......@@ -2600,10 +2600,6 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_X64
void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
#if !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
......@@ -2616,6 +2612,10 @@ void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
......
......@@ -279,7 +279,7 @@ T Sqrt(T a) {
return std::sqrt(a);
}
#if V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
// only used for F64x2 tests below
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
......@@ -292,7 +292,7 @@ int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
int64_t Less(double a, double b) { return a < b ? -1 : 0; }
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
#endif // V8_TARGET_ARCH_X64
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
} // namespace
......@@ -1183,10 +1183,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Mul) {
WASM_SIMD_TEST_NO_LOWERING(F64x2Div) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, Div);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_X64
void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleCompareOp expected_op) {
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
......@@ -1240,7 +1237,6 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
}
#endif // V8_TARGET_ARCH_X64
WASM_SIMD_TEST_NO_LOWERING(F64x2Min) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Min, JSMin);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment