Commit 1d157cf0 authored by Zhi An Ng's avatar Zhi An Ng Committed by Commit Bot

Reland "[wasm-simd] Scalar lowering for extended multiply"

This is a reland of 94f2212b

Nothing changed, think the failures were flaky.

Original change's description:
> [wasm-simd] Scalar lowering for extended multiply
>
> R=bbudge@chromium.org
>
> Bug: v8:11262
> Change-Id: Idd6a7514a16c561832af603dbf63779a0e402f45
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2603771
> Reviewed-by: Bill Budge <bbudge@chromium.org>
> Commit-Queue: Zhi An Ng <zhin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71920}

Bug: v8:11262
Change-Id: I6c504b2e0d1ad39e202483a72419dadb3b66eea8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2612330Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71965}
parent ffc832be
......@@ -120,7 +120,11 @@ void SimdScalarLowering::LowerGraph() {
V(I64x2ShrU) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul)
V(I64x2Mul) \
V(I64x2ExtMulLowI32x4S) \
V(I64x2ExtMulLowI32x4U) \
V(I64x2ExtMulHighI32x4S) \
V(I64x2ExtMulHighI32x4U)
#define FOREACH_INT32X4_OPCODE(V) \
V(I32x4Splat) \
......@@ -168,7 +172,11 @@ void SimdScalarLowering::LowerGraph() {
V(V16x8AllTrue) \
V(V8x16AnyTrue) \
V(V8x16AllTrue) \
V(I32x4BitMask)
V(I32x4BitMask) \
V(I32x4ExtMulLowI16x8S) \
V(I32x4ExtMulLowI16x8U) \
V(I32x4ExtMulHighI16x8S) \
V(I32x4ExtMulHighI16x8U)
#define FOREACH_FLOAT64X2_OPCODE(V) \
V(F64x2Splat) \
......@@ -268,7 +276,11 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8GeU) \
V(I16x8RoundingAverageU) \
V(I16x8Abs) \
V(I16x8BitMask)
V(I16x8BitMask) \
V(I16x8ExtMulLowI8x16S) \
V(I16x8ExtMulLowI8x16U) \
V(I16x8ExtMulHighI8x16S) \
V(I16x8ExtMulHighI8x16U)
#define FOREACH_INT8X16_OPCODE(V) \
V(I8x16Splat) \
......@@ -2268,12 +2280,63 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
#define LOWER_EXT_MUL(OP, MULTIPLY, INPUT_TYPE, LOW, SIGNED) \
case IrOpcode::OP: { \
LowerExtMul(node, machine()->MULTIPLY(), rep_type, SimdType::INPUT_TYPE, \
LOW, SIGNED); \
break; \
}
LOWER_EXT_MUL(kI16x8ExtMulLowI8x16S, Int32Mul, kInt8x16, true, true)
LOWER_EXT_MUL(kI16x8ExtMulLowI8x16U, Int32Mul, kInt8x16, true, false)
LOWER_EXT_MUL(kI16x8ExtMulHighI8x16S, Int32Mul, kInt8x16, false, true)
LOWER_EXT_MUL(kI16x8ExtMulHighI8x16U, Int32Mul, kInt8x16, false, false)
LOWER_EXT_MUL(kI32x4ExtMulLowI16x8S, Int32Mul, kInt16x8, true, true)
LOWER_EXT_MUL(kI32x4ExtMulLowI16x8U, Int32Mul, kInt16x8, true, false)
LOWER_EXT_MUL(kI32x4ExtMulHighI16x8S, Int32Mul, kInt16x8, false, true)
LOWER_EXT_MUL(kI32x4ExtMulHighI16x8U, Int32Mul, kInt16x8, false, false)
LOWER_EXT_MUL(kI64x2ExtMulLowI32x4S, Int64Mul, kInt32x4, true, true)
LOWER_EXT_MUL(kI64x2ExtMulLowI32x4U, Int64Mul, kInt32x4, true, false)
LOWER_EXT_MUL(kI64x2ExtMulHighI32x4S, Int64Mul, kInt32x4, false, true)
LOWER_EXT_MUL(kI64x2ExtMulHighI32x4U, Int64Mul, kInt32x4, false, false)
default: {
DefaultLowering(node);
}
}
}
Node* SimdScalarLowering::ExtendNode(Node* node, SimdType rep_type,
bool is_signed) {
if (rep_type == SimdType::kInt8x16 && !is_signed) {
node = Mask(node, kMask8);
} else if (rep_type == SimdType::kInt16x8 && !is_signed) {
node = Mask(node, kMask16);
} else if (rep_type == SimdType::kInt32x4) {
if (is_signed) {
node = graph()->NewNode(machine()->SignExtendWord32ToInt64(), node);
} else {
node = graph()->NewNode(machine()->ChangeUint32ToUint64(), node);
}
}
return node;
}
void SimdScalarLowering::LowerExtMul(Node* node, const Operator* multiply,
SimdType output_type, SimdType input_type,
bool low, bool is_signed) {
DCHECK_EQ(2, node->InputCount());
int num_lanes = NumLanes(output_type);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_type);
int start_index = low ? 0 : num_lanes;
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; i++) {
Node* left = ExtendNode(rep_left[start_index + i], input_type, is_signed);
Node* right = ExtendNode(rep_right[start_index + i], input_type, is_signed);
rep_node[i] = graph()->NewNode(multiply, left, right);
}
ReplaceNode(node, rep_node, num_lanes);
}
bool SimdScalarLowering::DefaultLowering(Node* node) {
bool something_changed = false;
for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
......
......@@ -121,6 +121,14 @@ class SimdScalarLowering {
void LowerAllTrueOp(Node* node, SimdType rep_type);
void LowerFloatPseudoMinMax(Node* node, const Operator* op, bool is_max,
SimdType type);
void LowerExtMul(Node* node, const Operator* op, SimdType output_type,
SimdType input_type, bool low, bool is_signed);
// Extends node, which is a lowered node of type rep_type, e.g. int8, int16,
// int32 to a 32-bit or 64-bit node. node should be a lowered node (i.e. not a
// SIMD node). The assumption here is that small ints are stored sign
// extended.
Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed);
MachineGraph* const mcgraph_;
NodeMarker<State> state_;
......
......@@ -2375,73 +2375,73 @@ void RunExtMulTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
}
} // namespace
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulLowI8x16S) {
WASM_SIMD_TEST(I16x8ExtMulLowI8x16S) {
RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulLowI8x16S, MultiplyLong,
kExprI8x16Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulHighI8x16S) {
WASM_SIMD_TEST(I16x8ExtMulHighI8x16S) {
RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulHighI8x16S, MultiplyLong,
kExprI8x16Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulLowI8x16U) {
WASM_SIMD_TEST(I16x8ExtMulLowI8x16U) {
RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulLowI8x16U, MultiplyLong,
kExprI8x16Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulHighI8x16U) {
WASM_SIMD_TEST(I16x8ExtMulHighI8x16U) {
RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulHighI8x16U, MultiplyLong,
kExprI8x16Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulLowI16x8S) {
WASM_SIMD_TEST(I32x4ExtMulLowI16x8S) {
RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulLowI16x8S, MultiplyLong,
kExprI16x8Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulHighI16x8S) {
WASM_SIMD_TEST(I32x4ExtMulHighI16x8S) {
RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulHighI16x8S, MultiplyLong,
kExprI16x8Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulLowI16x8U) {
WASM_SIMD_TEST(I32x4ExtMulLowI16x8U) {
RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulLowI16x8U, MultiplyLong,
kExprI16x8Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulHighI16x8U) {
WASM_SIMD_TEST(I32x4ExtMulHighI16x8U) {
RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulHighI16x8U, MultiplyLong,
kExprI16x8Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulLowI32x4S) {
WASM_SIMD_TEST(I64x2ExtMulLowI32x4S) {
RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulLowI32x4S, MultiplyLong,
kExprI32x4Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulHighI32x4S) {
WASM_SIMD_TEST(I64x2ExtMulHighI32x4S) {
RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulHighI32x4S, MultiplyLong,
kExprI32x4Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulLowI32x4U) {
WASM_SIMD_TEST(I64x2ExtMulLowI32x4U) {
RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulLowI32x4U, MultiplyLong,
kExprI32x4Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulHighI32x4U) {
WASM_SIMD_TEST(I64x2ExtMulHighI32x4U) {
RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulHighI32x4U, MultiplyLong,
kExprI32x4Splat, MulHalf::kHigh);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment