Commit 57406c3d authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm simd] Implement F64x2 Gt Ge Lt Le on x64

Bug: v8:8460
Change-Id: I98ae0b9cf90201ddf61488104f4c49df4e73b8dc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1690201
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62728}
parent 40d7e1a6
......@@ -1821,6 +1821,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Eq(node);
case IrOpcode::kF64x2Ne:
return MarkAsSimd128(node), VisitF64x2Ne(node);
case IrOpcode::kF64x2Lt:
return MarkAsSimd128(node), VisitF64x2Lt(node);
case IrOpcode::kF64x2Le:
return MarkAsSimd128(node), VisitF64x2Le(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
......@@ -2544,6 +2548,8 @@ void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
......
......@@ -2279,6 +2279,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Lt: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64F64x2Le: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
......
......@@ -163,6 +163,8 @@ namespace compiler {
V(X64F64x2ReplaceLane) \
V(X64F64x2Eq) \
V(X64F64x2Ne) \
V(X64F64x2Lt) \
V(X64F64x2Le) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
......
......@@ -129,6 +129,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2ReplaceLane:
case kX64F64x2Eq:
case kX64F64x2Ne:
case kX64F64x2Lt:
case kX64F64x2Le:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
......
......@@ -2595,6 +2595,8 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_BINOP_LIST(V) \
V(F64x2Eq) \
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
......
......@@ -248,6 +248,8 @@ MachineType AtomicOpType(Operator const* op) {
V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \
V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \
V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
......
......@@ -473,6 +473,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2ReplaceLane(int32_t);
const Operator* F64x2Eq();
const Operator* F64x2Ne();
const Operator* F64x2Lt();
const Operator* F64x2Le();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
......
......@@ -745,6 +745,8 @@
V(F64x2ReplaceLane) \
V(F64x2Eq) \
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
......
......@@ -4000,6 +4000,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF64x2Ne:
return graph()->NewNode(mcgraph()->machine()->F64x2Ne(), inputs[0],
inputs[1]);
case wasm::kExprF64x2Lt:
return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[0],
inputs[1]);
case wasm::kExprF64x2Le:
return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[0],
inputs[1]);
case wasm::kExprF64x2Gt:
return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[1],
inputs[0]);
case wasm::kExprF64x2Ge:
return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1],
inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
......
......@@ -2316,6 +2316,10 @@ class ThreadImpl {
}
CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
......
......@@ -234,6 +234,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Mul, "mul")
CASE_I64x2_OP(Mul, "mul")
CASE_F64x2_OP(Splat, "splat")
CASE_F64x2_OP(Lt, "lt")
CASE_F64x2_OP(Le, "le")
CASE_F64x2_OP(Gt, "gt")
CASE_F64x2_OP(Ge, "ge")
CASE_F32x4_OP(Abs, "abs")
CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
......
......@@ -323,6 +323,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(F32x4Ge, 0xfd45, s_ss) \
V(F64x2Eq, 0xfd46, s_ss) \
V(F64x2Ne, 0xfd47, s_ss) \
V(F64x2Lt, 0xfd48, s_ss) \
V(F64x2Gt, 0xfd49, s_ss) \
V(F64x2Le, 0xfd4a, s_ss) \
V(F64x2Ge, 0xfd4b, s_ss) \
V(S128Not, 0xfd4c, s_s) \
V(S128And, 0xfd4d, s_ss) \
V(S128Or, 0xfd4e, s_ss) \
......
......@@ -275,6 +275,14 @@ T Sqrt(T a) {
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
int64_t NotEqual(double a, double b) { return a != b ? -1 : 0; }
int64_t Greater(double a, double b) { return a > b ? -1 : 0; }
int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
int64_t Less(double a, double b) { return a < b ? -1 : 0; }
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
#endif // V8_TARGET_ARCH_X64
} // namespace
......@@ -816,6 +824,22 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment