Commit 32f85f7d authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd][scalar-lowering] Fix lowering for f64x2

Add lowering for F64x2 in S128Const and converting to and from f64x2.

Bug: v8:10507
Change-Id: Ic2c4f1f41d3dd804e012a943391a46b534864b51
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2424679Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70097}
parent ebe43399
...@@ -1253,8 +1253,14 @@ void SimdScalarLowering::LowerNode(Node* node) { ...@@ -1253,8 +1253,14 @@ void SimdScalarLowering::LowerNode(Node* node) {
} }
break; break;
} }
default: { case SimdType::kFloat64x2: {
UNIMPLEMENTED(); double val[kNumLanes64];
memcpy(val, params.data(), kSimd128Size);
for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = mcgraph_->Float64Constant(
base::ReadLittleEndianValue<double>(&val[i]));
}
break;
} }
} }
ReplaceNode(node, rep_node, num_lanes); ReplaceNode(node, rep_node, num_lanes);
...@@ -1368,6 +1374,7 @@ void SimdScalarLowering::LowerNode(Node* node) { ...@@ -1368,6 +1374,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
case SimdType::kInt8x16: case SimdType::kInt8x16:
case SimdType::kInt16x8: case SimdType::kInt16x8:
case SimdType::kInt64x2: case SimdType::kInt64x2:
case SimdType::kFloat64x2:
case SimdType::kFloat32x4: { case SimdType::kFloat32x4: {
Node** reps = GetReplacementsWithType(input, rep_type); Node** reps = GetReplacementsWithType(input, rep_type);
ReplaceNode(input, reps, NumLanes(rep_type)); ReplaceNode(input, reps, NumLanes(rep_type));
...@@ -1377,11 +1384,6 @@ void SimdScalarLowering::LowerNode(Node* node) { ...@@ -1377,11 +1384,6 @@ void SimdScalarLowering::LowerNode(Node* node) {
// No action needed. // No action needed.
break; break;
} }
default: {
// i64x2 and f64x2 aren't handled anywhere yet, ignore it here.
break;
UNIMPLEMENTED();
}
} }
} }
...@@ -2154,6 +2156,17 @@ void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) { ...@@ -2154,6 +2156,17 @@ void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) {
} }
} }
void SimdScalarLowering::Int64ToFloat64(Node** replacements, Node** result) {
for (int i = 0; i < kNumLanes64; ++i) {
if (replacements[i] != nullptr) {
result[i] =
graph()->NewNode(machine()->BitcastInt64ToFloat64(), replacements[i]);
} else {
result[i] = nullptr;
}
}
}
void SimdScalarLowering::Float64ToInt64(Node** replacements, Node** result) { void SimdScalarLowering::Float64ToInt64(Node** replacements, Node** result) {
for (int i = 0; i < kNumLanes64; ++i) { for (int i = 0; i < kNumLanes64; ++i) {
if (replacements[i] != nullptr) { if (replacements[i] != nullptr) {
...@@ -2273,10 +2286,18 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) { ...@@ -2273,10 +2286,18 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
if (type == SimdType::kInt64x2) { if (type == SimdType::kInt64x2) {
if (ReplacementType(node) == SimdType::kInt32x4) { if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToInt64(replacements, result); Int32ToInt64(replacements, result);
} else if (ReplacementType(node) == SimdType::kFloat64x2) {
Float64ToInt64(replacements, result);
} else {
UNIMPLEMENTED();
} }
} else if (type == SimdType::kInt32x4) { } else if (type == SimdType::kInt32x4) {
if (ReplacementType(node) == SimdType::kInt64x2) { if (ReplacementType(node) == SimdType::kInt64x2) {
Int64ToInt32(replacements, result); Int64ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kFloat64x2) {
Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
Float64ToInt64(replacements, float64_to_int64);
Int64ToInt32(float64_to_int64, result);
} else if (ReplacementType(node) == SimdType::kFloat32x4) { } else if (ReplacementType(node) == SimdType::kFloat32x4) {
Float32ToInt32(replacements, result); Float32ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) { } else if (ReplacementType(node) == SimdType::kInt16x8) {
...@@ -2286,6 +2307,16 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) { ...@@ -2286,6 +2307,16 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
} else if (type == SimdType::kFloat64x2) {
if (ReplacementType(node) == SimdType::kInt64x2) {
Int64ToFloat64(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt32x4) {
Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
Int32ToInt64(replacements, int32_to_int64);
Int64ToFloat64(int32_to_int64, result);
} else {
UNIMPLEMENTED();
}
} else if (type == SimdType::kFloat32x4) { } else if (type == SimdType::kFloat32x4) {
if (ReplacementType(node) == SimdType::kFloat64x2) { if (ReplacementType(node) == SimdType::kFloat64x2) {
Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64); Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
......
...@@ -77,6 +77,7 @@ class SimdScalarLowering { ...@@ -77,6 +77,7 @@ class SimdScalarLowering {
void Float64ToInt64(Node** replacements, Node** result); void Float64ToInt64(Node** replacements, Node** result);
void Float32ToInt32(Node** replacements, Node** result); void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result); void Int32ToFloat32(Node** replacements, Node** result);
void Int64ToFloat64(Node** replacements, Node** result);
void Int64ToInt32(Node** replacements, Node** result); void Int64ToInt32(Node** replacements, Node** result);
template <typename T> template <typename T>
void Int32ToSmallerInt(Node** replacements, Node** result); void Int32ToSmallerInt(Node** replacements, Node** result);
......
...@@ -43,6 +43,29 @@ WASM_SIMD_TEST(I8x16ToF32x4) { ...@@ -43,6 +43,29 @@ WASM_SIMD_TEST(I8x16ToF32x4) {
CHECK_EQ(expected, actual); CHECK_EQ(expected, actual);
} }
WASM_SIMD_TEST(F64x2_Call_Return) {
// Check that calling a function with i16x8 arguments, and returns i16x8, is
// correctly lowered. The signature of the functions are always lowered to 4
// Word32, so each i16x8 needs to be correctly converted.
TestSignatures sigs;
WasmRunner<double, double, double> r(execution_tier, lower_simd);
WasmFunctionCompiler& fn = r.NewFunction(sigs.s_ss());
BUILD(fn,
WASM_SIMD_BINOP(kExprF64x2Min, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
byte c1[16] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
byte c2[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x7f};
BUILD(r,
WASM_SIMD_F64x2_EXTRACT_LANE(
0, WASM_CALL_FUNCTION(fn.function_index(), WASM_SIMD_CONSTANT(c1),
WASM_SIMD_CONSTANT(c2))));
CHECK_EQ(0, r.Call(double{0}, bit_cast<double>(0x7fefffffffffffff)));
}
WASM_SIMD_TEST(F32x4_Call_Return) { WASM_SIMD_TEST(F32x4_Call_Return) {
// Check that functions that return F32x4 are correctly lowered into 4 int32 // Check that functions that return F32x4 are correctly lowered into 4 int32
// nodes. The signature of such functions are always lowered to 4 Word32, and // nodes. The signature of such functions are always lowered to 4 Word32, and
......
...@@ -1150,9 +1150,7 @@ WASM_SIMD_TEST(F64x2ReplaceLane) { ...@@ -1150,9 +1150,7 @@ WASM_SIMD_TEST(F64x2ReplaceLane) {
CHECK_EQ(1., ReadLittleEndianValue<double>(&g1[1])); CHECK_EQ(1., ReadLittleEndianValue<double>(&g1[1]));
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X || \ WASM_SIMD_TEST(F64x2ExtractLaneWithI64x2) {
V8_TARGET_ARCH_MIPS64
WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd); WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L( BUILD(r, WASM_IF_ELSE_L(
WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE( WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE(
...@@ -1162,7 +1160,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) { ...@@ -1162,7 +1160,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
CHECK_EQ(1, r.Call()); CHECK_EQ(1, r.Call());
} }
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) { WASM_SIMD_TEST(I64x2ExtractWithF64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd); WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L( BUILD(r, WASM_IF_ELSE_L(
WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE( WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
...@@ -1171,8 +1169,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) { ...@@ -1171,8 +1169,6 @@ WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
WASM_I64V(1), WASM_I64V(0))); WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call()); CHECK_EQ(1, r.Call());
} }
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X ||
// V8_TARGET_ARCH_MIPS64
bool IsExtreme(double x) { bool IsExtreme(double x) {
double abs_x = std::fabs(x); double abs_x = std::fabs(x);
......
...@@ -42,10 +42,6 @@ ...@@ -42,10 +42,6 @@
# we finish the implementation, see v8:10507. # we finish the implementation, see v8:10507.
'proposals/simd/simd_bit_shift' : [PASS, FAIL], 'proposals/simd/simd_bit_shift' : [PASS, FAIL],
'proposals/simd/simd_conversions' : [PASS, FAIL], 'proposals/simd/simd_conversions' : [PASS, FAIL],
'proposals/simd/simd_f64x2': [PASS, FAIL],
'proposals/simd/simd_f64x2_arith' : [PASS, FAIL],
'proposals/simd/simd_f64x2_cmp' : [PASS, FAIL],
'proposals/simd/simd_i64x2_arith' : [PASS, FAIL],
'proposals/simd/simd_lane' : [PASS, FAIL], 'proposals/simd/simd_lane' : [PASS, FAIL],
'proposals/simd/simd_load_extend' : [PASS, FAIL], 'proposals/simd/simd_load_extend' : [PASS, FAIL],
'proposals/simd/simd_load_splat' : [PASS, FAIL], 'proposals/simd/simd_load_splat' : [PASS, FAIL],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment