Commit 2975ead6 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd] Optimize codegen when shift is constant

Define macros to identify constant shift values, we can emit better
codegen in these cases. Otherwise we need to mask the shift value before
shifting. We also need separate cases for left and right shifts, since
right shifts require the shift value to be negated.

Bug: v8:10115
Change-Id: I9a032901d03d59cfaa871eefbc58f7f144fd521f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2041709Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66325}
parent 35afef86
......@@ -152,12 +152,22 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
}
void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Node* node, int width) {
Arm64OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
if (g.IsIntegerConstant(node->InputAt(1))) {
if (g.GetIntegerConstantValue(node->InputAt(1)) % width == 0) {
selector->EmitIdentity(node);
} else {
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseImmediate(node->InputAt(1)));
}
} else {
InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
}
}
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
......@@ -3174,18 +3184,18 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(S1x16AllTrue, kArm64S1x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
V(I64x2ShrS) \
V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
V(I16x8ShrU) \
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16ShrU)
V(I64x2Shl, 64) \
V(I64x2ShrS, 64) \
V(I64x2ShrU, 64) \
V(I32x4Shl, 32) \
V(I32x4ShrS, 32) \
V(I32x4ShrU, 32) \
V(I16x8Shl, 16) \
V(I16x8ShrS, 16) \
V(I16x8ShrU, 16) \
V(I8x16Shl, 8) \
V(I8x16ShrS, 8) \
V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, kArm64F64x2Add) \
......@@ -3319,9 +3329,9 @@ SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
#undef SIMD_VISIT_UNOP
#undef SIMD_UNOP_LIST
#define SIMD_VISIT_SHIFT_OP(Name) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitSimdShiftRRR(this, kArm64##Name, node); \
#define SIMD_VISIT_SHIFT_OP(Name, width) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitSimdShiftRRR(this, kArm64##Name, node, width); \
}
SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
#undef SIMD_VISIT_SHIFT_OP
......
......@@ -351,6 +351,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont);
void EmitIdentity(Node* node);
// ===========================================================================
// ===== Architecture-independent deoptimization exit emission methods. ======
// ===========================================================================
......@@ -655,7 +657,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
const CallDescriptor* call_descriptor, Node* node);
void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
// ===========================================================================
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment