Commit a4c9b582 authored by Milad Farazmand's avatar Milad Farazmand Committed by Commit Bot

PPC: [wasm-simd] Implement simd shift operations

Change-Id: Iae7b56504366c7867439b7d7956f1202b2a8dc5a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2239369Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Milad Farazmand <miladfar@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#68305}
parent 667fafce
......@@ -2282,7 +2282,31 @@ using Instr = uint32_t;
/* Vector Minimum Signed Word */ \
V(vminsw, VMINSW, 0x10000382) \
/* Vector Minimum Unsigned Word */ \
V(vminuw, VMINUW, 0x10000282)
V(vminuw, VMINUW, 0x10000282) \
/* Vector Shift Left Byte */ \
V(vslb, VSLB, 0x10000104) \
/* Vector Shift Left Word */ \
V(vslw, VSLW, 0x10000184) \
/* Vector Shift Left Halfword */ \
V(vslh, VSLH, 0x10000144) \
/* Vector Shift Left Doubleword */ \
V(vsld, VSLD, 0x100005C4) \
/* Vector Shift Right Byte */ \
V(vsrb, VSRB, 0x10000204) \
/* Vector Shift Right Word */ \
V(vsrw, VSRW, 0x10000284) \
/* Vector Shift Right Halfword */ \
V(vsrh, VSRH, 0x10000244) \
/* Vector Shift Right Doubleword */ \
V(vsrd, VSRD, 0x100006C4) \
/* Vector Shift Right Algebraic Byte */ \
V(vsrab, VSRAB, 0x10000304) \
/* Vector Shift Right Algebraic Word */ \
V(vsraw, VSRAW, 0x10000384) \
/* Vector Shift Right Algebraic Halfword */ \
V(vsrah, VSRAH, 0x10000344) \
/* Vector Shift Right Algebraic Doubleword */ \
V(vsrad, VSRAD, 0x100003C4)
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
......@@ -2453,14 +2477,6 @@ using Instr = uint32_t;
V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
/* Vector Shift Left */ \
V(vsl, VSL, 0x100001C4) \
/* Vector Shift Left Byte */ \
V(vslb, VSLB, 0x10000104) \
/* Vector Shift Left Doubleword */ \
V(vsld, VSLD, 0x100005C4) \
/* Vector Shift Left Halfword */ \
V(vslh, VSLH, 0x10000144) \
/* Vector Shift Left Word */ \
V(vslw, VSLW, 0x10000184) \
/* Vector Splat Immediate Signed Byte */ \
V(vspltisb, VSPLTISB, 0x1000030C) \
/* Vector Splat Immediate Signed Halfword */ \
......@@ -2469,22 +2485,6 @@ using Instr = uint32_t;
V(vspltisw, VSPLTISW, 0x1000038C) \
/* Vector Shift Right */ \
V(vsr, VSR, 0x100002C4) \
/* Vector Shift Right Algebraic Byte */ \
V(vsrab, VSRAB, 0x10000304) \
/* Vector Shift Right Algebraic Doubleword */ \
V(vsrad, VSRAD, 0x100003C4) \
/* Vector Shift Right Algebraic Halfword */ \
V(vsrah, VSRAH, 0x10000344) \
/* Vector Shift Right Algebraic Word */ \
V(vsraw, VSRAW, 0x10000384) \
/* Vector Shift Right Byte */ \
V(vsrb, VSRB, 0x10000204) \
/* Vector Shift Right Doubleword */ \
V(vsrd, VSRD, 0x100006C4) \
/* Vector Shift Right Halfword */ \
V(vsrh, VSRH, 0x10000244) \
/* Vector Shift Right Word */ \
V(vsrw, VSRW, 0x10000284) \
/* Vector Subtract & write Carry Unsigned Quadword */ \
V(vsubcuq, VSUBCUQ, 0x10000540) \
/* Vector Subtract and Write Carry-Out Unsigned Word */ \
......
......@@ -2810,6 +2810,62 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
#define VECTOR_SHIFT(op) \
{ \
__ mtvsrd(kScratchDoubleReg, i.InputRegister(1)); \
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
kScratchDoubleReg); \
}
case kPPC_I64x2Shl: {
VECTOR_SHIFT(vsld)
break;
}
case kPPC_I64x2ShrS: {
VECTOR_SHIFT(vsrad)
break;
}
case kPPC_I64x2ShrU: {
VECTOR_SHIFT(vsrd)
break;
}
case kPPC_I32x4Shl: {
VECTOR_SHIFT(vslw)
break;
}
case kPPC_I32x4ShrS: {
VECTOR_SHIFT(vsraw)
break;
}
case kPPC_I32x4ShrU: {
VECTOR_SHIFT(vsrw)
break;
}
case kPPC_I16x8Shl: {
VECTOR_SHIFT(vslh)
break;
}
case kPPC_I16x8ShrS: {
VECTOR_SHIFT(vsrah)
break;
}
case kPPC_I16x8ShrU: {
VECTOR_SHIFT(vsrh)
break;
}
case kPPC_I8x16Shl: {
VECTOR_SHIFT(vslb)
break;
}
case kPPC_I8x16ShrS: {
VECTOR_SHIFT(vsrab)
break;
}
case kPPC_I8x16ShrU: {
VECTOR_SHIFT(vsrb)
break;
}
#undef VECTOR_SHIFT
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
......
......@@ -227,6 +227,9 @@ namespace compiler {
V(PPC_I64x2GtU) \
V(PPC_I64x2GeU) \
V(PPC_I64x2GeS) \
V(PPC_I64x2Shl) \
V(PPC_I64x2ShrS) \
V(PPC_I64x2ShrU) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
V(PPC_I32x4ReplaceLane) \
......@@ -244,6 +247,9 @@ namespace compiler {
V(PPC_I32x4GeS) \
V(PPC_I32x4GtU) \
V(PPC_I32x4GeU) \
V(PPC_I32x4Shl) \
V(PPC_I32x4ShrS) \
V(PPC_I32x4ShrU) \
V(PPC_I16x8Splat) \
V(PPC_I16x8ExtractLaneU) \
V(PPC_I16x8ExtractLaneS) \
......@@ -262,6 +268,9 @@ namespace compiler {
V(PPC_I16x8GeS) \
V(PPC_I16x8GtU) \
V(PPC_I16x8GeU) \
V(PPC_I16x8Shl) \
V(PPC_I16x8ShrS) \
V(PPC_I16x8ShrU) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
......@@ -279,6 +288,9 @@ namespace compiler {
V(PPC_I8x16GeS) \
V(PPC_I8x16GtU) \
V(PPC_I8x16GeU) \
V(PPC_I8x16Shl) \
V(PPC_I8x16ShrS) \
V(PPC_I8x16ShrU) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
......
......@@ -150,6 +150,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I64x2GtU:
case kPPC_I64x2GeU:
case kPPC_I64x2GeS:
case kPPC_I64x2Shl:
case kPPC_I64x2ShrS:
case kPPC_I64x2ShrU:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
case kPPC_I32x4ReplaceLane:
......@@ -167,6 +170,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I32x4GeS:
case kPPC_I32x4GtU:
case kPPC_I32x4GeU:
case kPPC_I32x4Shl:
case kPPC_I32x4ShrS:
case kPPC_I32x4ShrU:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
......@@ -185,6 +191,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I16x8GeS:
case kPPC_I16x8GtU:
case kPPC_I16x8GeU:
case kPPC_I16x8Shl:
case kPPC_I16x8ShrS:
case kPPC_I16x8ShrU:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
......@@ -202,6 +211,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16GeS:
case kPPC_I8x16GtU:
case kPPC_I8x16GeU:
case kPPC_I8x16Shl:
case kPPC_I8x16ShrS:
case kPPC_I8x16ShrU:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
......
......@@ -2188,6 +2188,20 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16GtU) \
V(I8x16GeU)
#define SIMD_SHIFT_LIST(V) \
V(I64x2Shl) \
V(I64x2ShrS) \
V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
V(I16x8ShrU) \
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16ShrU)
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
PPCOperandGenerator g(this); \
......@@ -2236,22 +2250,21 @@ SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
#undef SIMD_TYPES
void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
#define SIMD_VISIT_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
PPCOperandGenerator g(this); \
Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
g.UseUniqueRegister(node->InputAt(0)), \
g.UseUniqueRegister(node->InputAt(1))); \
}
SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
#undef SIMD_VISIT_SHIFT
#undef SIMD_SHIFT_LIST
#undef SIMD_TYPES
void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
......@@ -2426,12 +2439,6 @@ void InstructionSelector::VisitV8x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitV8x16AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
......@@ -2446,12 +2453,6 @@ void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment