Commit 08cbf4e4 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

PPC [liftoff]: Implement simd shift operations

Change-Id: Id27959b1e65b86e6d00bd67f637d14a4606a9765
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3899300
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/main@{#83273}
parent aabcb150
......@@ -3734,6 +3734,37 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
#define SIMD_SHIFT_LIST(V) \
V(I64x2Shl, vsld) \
V(I64x2ShrS, vsrad) \
V(I64x2ShrU, vsrd) \
V(I32x4Shl, vslw) \
V(I32x4ShrS, vsraw) \
V(I32x4ShrU, vsrw) \
V(I16x8Shl, vslh) \
V(I16x8ShrS, vsrah) \
V(I16x8ShrU, vsrh) \
V(I8x16Shl, vslb) \
V(I8x16ShrS, vsrab) \
V(I8x16ShrU, vsrb)
#define EMIT_SIMD_SHIFT(name, op) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
Register src2, Simd128Register scratch) { \
mtvsrd(scratch, src2); \
vspltb(scratch, scratch, Operand(7)); \
op(dst, src1, scratch); \
} \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
const Operand& src2, Register scratch1, \
Simd128Register scratch2) { \
mov(scratch1, src2); \
name(dst, src1, scratch1, scratch2); \
}
SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
#undef EMIT_SIMD_SHIFT
#undef SIMD_SHIFT_LIST
void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
Register scratch) {
GenerateMemoryOperationRR(dst, mem, lxvx);
......
......@@ -1137,6 +1137,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
#define SIMD_SHIFT_LIST(V) \
V(I64x2Shl) \
V(I64x2ShrS) \
V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
V(I16x8ShrU) \
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16ShrU)
#define PROTOTYPE_SIMD_SHIFT(name) \
void name(Simd128Register dst, Simd128Register src1, Register src2, \
Simd128Register scratch); \
void name(Simd128Register dst, Simd128Register src1, const Operand& src2, \
Register scratch1, Simd128Register scratch2);
SIMD_SHIFT_LIST(PROTOTYPE_SIMD_SHIFT)
#undef PROTOTYPE_SIMD_SHIFT
#undef SIMD_SHIFT_LIST
void LoadSimd128(Simd128Register dst, const MemOperand& mem,
Register scratch);
void StoreSimd128(Simd128Register src, const MemOperand& mem,
......
......@@ -2255,6 +2255,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
#define SIMD_SHIFT_LIST(V) \
V(I64x2Shl) \
V(I64x2ShrS) \
V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
V(I16x8Shl) \
V(I16x8ShrS) \
V(I16x8ShrU) \
V(I8x16Shl) \
V(I8x16ShrS) \
V(I8x16ShrU)
#define EMIT_SIMD_SHIFT(name) \
case kPPC_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputRegister(1), kScratchSimd128Reg); \
break; \
}
SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
#undef EMIT_SIMD_SHIFT
#undef SIMD_SHIFT_LIST
case kPPC_F64x2Splat: {
__ F64x2Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0),
kScratchReg);
......@@ -2446,62 +2470,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), kScratchSimd128Reg);
break;
}
#define VECTOR_SHIFT(op) \
{ \
__ mtvsrd(kScratchSimd128Reg, i.InputRegister(1)); \
__ vspltb(kScratchSimd128Reg, kScratchSimd128Reg, Operand(7)); \
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
kScratchSimd128Reg); \
}
case kPPC_I64x2Shl: {
VECTOR_SHIFT(vsld)
break;
}
case kPPC_I64x2ShrS: {
VECTOR_SHIFT(vsrad)
break;
}
case kPPC_I64x2ShrU: {
VECTOR_SHIFT(vsrd)
break;
}
case kPPC_I32x4Shl: {
VECTOR_SHIFT(vslw)
break;
}
case kPPC_I32x4ShrS: {
VECTOR_SHIFT(vsraw)
break;
}
case kPPC_I32x4ShrU: {
VECTOR_SHIFT(vsrw)
break;
}
case kPPC_I16x8Shl: {
VECTOR_SHIFT(vslh)
break;
}
case kPPC_I16x8ShrS: {
VECTOR_SHIFT(vsrah)
break;
}
case kPPC_I16x8ShrU: {
VECTOR_SHIFT(vsrh)
break;
}
case kPPC_I8x16Shl: {
VECTOR_SHIFT(vslb)
break;
}
case kPPC_I8x16ShrS: {
VECTOR_SHIFT(vsrab)
break;
}
case kPPC_I8x16ShrU: {
VECTOR_SHIFT(vsrb)
break;
}
#undef VECTOR_SHIFT
case kPPC_S128And: {
Simd128Register dst = i.OutputSimd128Register();
Simd128Register src = i.InputSimd128Register(1);
......
......@@ -1829,6 +1829,53 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
#define SIMD_SHIFT_RR_LIST(V) \
V(i64x2_shl, I64x2Shl) \
V(i64x2_shr_s, I64x2ShrS) \
V(i64x2_shr_u, I64x2ShrU) \
V(i32x4_shl, I32x4Shl) \
V(i32x4_shr_s, I32x4ShrS) \
V(i32x4_shr_u, I32x4ShrU) \
V(i16x8_shl, I16x8Shl) \
V(i16x8_shr_s, I16x8ShrS) \
V(i16x8_shr_u, I16x8ShrU) \
V(i8x16_shl, I8x16Shl) \
V(i8x16_shr_s, I8x16ShrS) \
V(i8x16_shr_u, I8x16ShrU)
#define EMIT_SIMD_SHIFT_RR(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
op(dst.fp().toSimd(), lhs.fp().toSimd(), rhs.gp(), kScratchSimd128Reg); \
}
SIMD_SHIFT_RR_LIST(EMIT_SIMD_SHIFT_RR)
#undef EMIT_SIMD_SHIFT_RR
#undef SIMD_SHIFT_RR_LIST
#define SIMD_SHIFT_RI_LIST(V) \
V(i64x2_shli, I64x2Shl) \
V(i64x2_shri_s, I64x2ShrS) \
V(i64x2_shri_u, I64x2ShrU) \
V(i32x4_shli, I32x4Shl) \
V(i32x4_shri_s, I32x4ShrS) \
V(i32x4_shri_u, I32x4ShrU) \
V(i16x8_shli, I16x8Shl) \
V(i16x8_shri_s, I16x8ShrS) \
V(i16x8_shri_u, I16x8ShrU) \
V(i8x16_shli, I8x16Shl) \
V(i8x16_shri_s, I8x16ShrS) \
V(i8x16_shri_u, I8x16ShrU)
#define EMIT_SIMD_SHIFT_RI(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t rhs) { \
op(dst.fp().toSimd(), lhs.fp().toSimd(), Operand(rhs), r0, \
kScratchSimd128Reg); \
}
SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
#undef EMIT_SIMD_SHIFT_RI
#undef SIMD_SHIFT_RI_LIST
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
F64x2Splat(dst.fp().toSimd(), src.fp(), r0);
......@@ -2279,38 +2326,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shl");
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i64x2_shli");
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shr_s");
}
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i64x2_shri_s");
}
void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shr_u");
}
void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i64x2_shri_u");
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
......@@ -2375,38 +2390,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shl");
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i32x4_shli");
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shr_s");
}
void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i32x4_shri_s");
}
void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shr_u");
}
void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i32x4_shri_u");
}
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
......@@ -2462,38 +2445,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shl");
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i16x8_shli");
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shr_s");
}
void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i16x8_shri_s");
}
void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shr_u");
}
void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i16x8_shri_u");
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
......@@ -2610,38 +2561,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shl");
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i8x16_shli");
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shr_s");
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i8x16_shri_s");
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shr_u");
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i8x16_shri_u");
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment