Commit 47505486 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [liftoff]: implement simd shift operations

This CL implements both the Register-Register and the
Register-Immediate variants needed by liftoff.

Change-Id: I148df8418097004710a17e0b216c2f18db808b8c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3105085Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#76420}
parent 67b6a9bf
......@@ -5272,7 +5272,37 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
// Opcodes without a 1-1 match.
#define SIMD_SHIFT_LIST(V) \
V(I64x2Shl, veslv, 3) \
V(I64x2ShrS, vesrav, 3) \
V(I64x2ShrU, vesrlv, 3) \
V(I32x4Shl, veslv, 2) \
V(I32x4ShrS, vesrav, 2) \
V(I32x4ShrU, vesrlv, 2) \
V(I16x8Shl, veslv, 1) \
V(I16x8ShrS, vesrav, 1) \
V(I16x8ShrU, vesrlv, 1) \
V(I8x16Shl, veslv, 0) \
V(I8x16ShrS, vesrav, 0) \
V(I8x16ShrU, vesrlv, 0)
#define EMIT_SIMD_SHIFT(name, op, c1) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
Register src2) { \
vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(c1)); \
} \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
const Operand& src2) { \
mov(ip, src2); \
name(dst, src1, ip); \
}
SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
#undef EMIT_SIMD_SHIFT
#undef SIMD_SHIFT_LIST
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
......
......@@ -1070,75 +1070,99 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx);
#define SIMD_BINOP_LIST(V) \
V(F64x2Add) \
V(F64x2Sub) \
V(F64x2Mul) \
V(F64x2Div) \
V(F64x2Min) \
V(F64x2Max) \
V(F64x2Eq) \
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
V(I64x2Ne) \
V(I64x2GtS) \
V(I64x2GeS) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4GtS) \
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
V(I32x4MinS) \
V(I32x4MinU) \
V(I32x4MaxS) \
V(I32x4MaxU) \
V(I16x8Add) \
V(I16x8Sub) \
V(I16x8Mul) \
V(I16x8Eq) \
V(I16x8Ne) \
V(I16x8GtS) \
V(I16x8GeS) \
V(I16x8GtU) \
V(I16x8GeU) \
V(I16x8MinS) \
V(I16x8MinU) \
V(I16x8MaxS) \
V(I16x8MaxU) \
V(I8x16Add) \
V(I8x16Sub) \
V(I8x16Eq) \
V(I8x16Ne) \
V(I8x16GtS) \
V(I8x16GeS) \
V(I8x16GtU) \
V(I8x16GeU) \
V(I8x16MinS) \
V(I8x16MinU) \
V(I8x16MaxS) \
V(I8x16MaxU)
#define PROTOTYPE_SIMD_BINOP(name) \
void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
V(F64x2Mul, Simd128Register) \
V(F64x2Div, Simd128Register) \
V(F64x2Min, Simd128Register) \
V(F64x2Max, Simd128Register) \
V(F64x2Eq, Simd128Register) \
V(F64x2Ne, Simd128Register) \
V(F64x2Lt, Simd128Register) \
V(F64x2Le, Simd128Register) \
V(F32x4Add, Simd128Register) \
V(F32x4Sub, Simd128Register) \
V(F32x4Mul, Simd128Register) \
V(F32x4Div, Simd128Register) \
V(F32x4Min, Simd128Register) \
V(F32x4Max, Simd128Register) \
V(F32x4Eq, Simd128Register) \
V(F32x4Ne, Simd128Register) \
V(F32x4Lt, Simd128Register) \
V(F32x4Le, Simd128Register) \
V(I64x2Add, Simd128Register) \
V(I64x2Sub, Simd128Register) \
V(I64x2Mul, Simd128Register) \
V(I64x2Eq, Simd128Register) \
V(I64x2Ne, Simd128Register) \
V(I64x2GtS, Simd128Register) \
V(I64x2GeS, Simd128Register) \
V(I64x2Shl, Register) \
V(I64x2ShrS, Register) \
V(I64x2ShrU, Register) \
V(I64x2Shl, const Operand&) \
V(I64x2ShrS, const Operand&) \
V(I64x2ShrU, const Operand&) \
V(I32x4Add, Simd128Register) \
V(I32x4Sub, Simd128Register) \
V(I32x4Mul, Simd128Register) \
V(I32x4Eq, Simd128Register) \
V(I32x4Ne, Simd128Register) \
V(I32x4GtS, Simd128Register) \
V(I32x4GeS, Simd128Register) \
V(I32x4GtU, Simd128Register) \
V(I32x4GeU, Simd128Register) \
V(I32x4MinS, Simd128Register) \
V(I32x4MinU, Simd128Register) \
V(I32x4MaxS, Simd128Register) \
V(I32x4MaxU, Simd128Register) \
V(I32x4Shl, Register) \
V(I32x4ShrS, Register) \
V(I32x4ShrU, Register) \
V(I32x4Shl, const Operand&) \
V(I32x4ShrS, const Operand&) \
V(I32x4ShrU, const Operand&) \
V(I16x8Add, Simd128Register) \
V(I16x8Sub, Simd128Register) \
V(I16x8Mul, Simd128Register) \
V(I16x8Eq, Simd128Register) \
V(I16x8Ne, Simd128Register) \
V(I16x8GtS, Simd128Register) \
V(I16x8GeS, Simd128Register) \
V(I16x8GtU, Simd128Register) \
V(I16x8GeU, Simd128Register) \
V(I16x8MinS, Simd128Register) \
V(I16x8MinU, Simd128Register) \
V(I16x8MaxS, Simd128Register) \
V(I16x8MaxU, Simd128Register) \
V(I16x8Shl, Register) \
V(I16x8ShrS, Register) \
V(I16x8ShrU, Register) \
V(I16x8Shl, const Operand&) \
V(I16x8ShrS, const Operand&) \
V(I16x8ShrU, const Operand&) \
V(I8x16Add, Simd128Register) \
V(I8x16Sub, Simd128Register) \
V(I8x16Eq, Simd128Register) \
V(I8x16Ne, Simd128Register) \
V(I8x16GtS, Simd128Register) \
V(I8x16GeS, Simd128Register) \
V(I8x16GtU, Simd128Register) \
V(I8x16GeU, Simd128Register) \
V(I8x16MinS, Simd128Register) \
V(I8x16MinU, Simd128Register) \
V(I8x16MaxS, Simd128Register) \
V(I8x16MaxU, Simd128Register) \
V(I8x16Shl, Register) \
V(I8x16ShrS, Register) \
V(I8x16ShrU, Register) \
V(I8x16Shl, const Operand&) \
V(I8x16ShrS, const Operand&) \
V(I8x16ShrU, const Operand&)
#define PROTOTYPE_SIMD_BINOP(name, stype) \
void name(Simd128Register dst, Simd128Register src1, stype src2);
SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
......
......@@ -2426,77 +2426,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
#define SIMD_BINOP_LIST(V) \
V(F64x2Add) \
V(F64x2Sub) \
V(F64x2Mul) \
V(F64x2Div) \
V(F64x2Min) \
V(F64x2Max) \
V(F64x2Eq) \
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
V(I64x2Ne) \
V(I64x2GtS) \
V(I64x2GeS) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4GtS) \
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
V(I32x4MinS) \
V(I32x4MinU) \
V(I32x4MaxS) \
V(I32x4MaxU) \
V(I16x8Add) \
V(I16x8Sub) \
V(I16x8Mul) \
V(I16x8Eq) \
V(I16x8Ne) \
V(I16x8GtS) \
V(I16x8GeS) \
V(I16x8GtU) \
V(I16x8GeU) \
V(I16x8MinS) \
V(I16x8MinU) \
V(I16x8MaxS) \
V(I16x8MaxU) \
V(I8x16Add) \
V(I8x16Sub) \
V(I8x16Eq) \
V(I8x16Ne) \
V(I8x16GtS) \
V(I8x16GeS) \
V(I8x16GtU) \
V(I8x16GeU) \
V(I8x16MinS) \
V(I8x16MinU) \
V(I8x16MaxS) \
V(I8x16MaxU)
#define EMIT_SIMD_BINOP(name) \
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
V(F64x2Mul, Simd128Register) \
V(F64x2Div, Simd128Register) \
V(F64x2Min, Simd128Register) \
V(F64x2Max, Simd128Register) \
V(F64x2Eq, Simd128Register) \
V(F64x2Ne, Simd128Register) \
V(F64x2Lt, Simd128Register) \
V(F64x2Le, Simd128Register) \
V(F32x4Add, Simd128Register) \
V(F32x4Sub, Simd128Register) \
V(F32x4Mul, Simd128Register) \
V(F32x4Div, Simd128Register) \
V(F32x4Min, Simd128Register) \
V(F32x4Max, Simd128Register) \
V(F32x4Eq, Simd128Register) \
V(F32x4Ne, Simd128Register) \
V(F32x4Lt, Simd128Register) \
V(F32x4Le, Simd128Register) \
V(I64x2Add, Simd128Register) \
V(I64x2Sub, Simd128Register) \
V(I64x2Mul, Simd128Register) \
V(I64x2Eq, Simd128Register) \
V(I64x2Ne, Simd128Register) \
V(I64x2GtS, Simd128Register) \
V(I64x2GeS, Simd128Register) \
V(I64x2Shl, Register) \
V(I64x2ShrS, Register) \
V(I64x2ShrU, Register) \
V(I32x4Add, Simd128Register) \
V(I32x4Sub, Simd128Register) \
V(I32x4Mul, Simd128Register) \
V(I32x4Eq, Simd128Register) \
V(I32x4Ne, Simd128Register) \
V(I32x4GtS, Simd128Register) \
V(I32x4GeS, Simd128Register) \
V(I32x4GtU, Simd128Register) \
V(I32x4GeU, Simd128Register) \
V(I32x4MinS, Simd128Register) \
V(I32x4MinU, Simd128Register) \
V(I32x4MaxS, Simd128Register) \
V(I32x4MaxU, Simd128Register) \
V(I32x4Shl, Register) \
V(I32x4ShrS, Register) \
V(I32x4ShrU, Register) \
V(I16x8Add, Simd128Register) \
V(I16x8Sub, Simd128Register) \
V(I16x8Mul, Simd128Register) \
V(I16x8Eq, Simd128Register) \
V(I16x8Ne, Simd128Register) \
V(I16x8GtS, Simd128Register) \
V(I16x8GeS, Simd128Register) \
V(I16x8GtU, Simd128Register) \
V(I16x8GeU, Simd128Register) \
V(I16x8MinS, Simd128Register) \
V(I16x8MinU, Simd128Register) \
V(I16x8MaxS, Simd128Register) \
V(I16x8MaxU, Simd128Register) \
V(I16x8Shl, Register) \
V(I16x8ShrS, Register) \
V(I16x8ShrU, Register) \
V(I8x16Add, Simd128Register) \
V(I8x16Sub, Simd128Register) \
V(I8x16Eq, Simd128Register) \
V(I8x16Ne, Simd128Register) \
V(I8x16GtS, Simd128Register) \
V(I8x16GeS, Simd128Register) \
V(I8x16GtU, Simd128Register) \
V(I8x16GeU, Simd128Register) \
V(I8x16MinS, Simd128Register) \
V(I8x16MinU, Simd128Register) \
V(I8x16MaxS, Simd128Register) \
V(I8x16MaxU, Simd128Register) \
V(I8x16Shl, Register) \
V(I8x16ShrS, Register) \
V(I8x16ShrU, Register)
#define EMIT_SIMD_BINOP(name, stype) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
i.InputSimd128Register(1)); \
i.Input##stype(1)); \
break; \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
......@@ -2601,64 +2613,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
// vector shifts
#define VECTOR_SHIFT(op, mode) \
{ \
__ vlvg(kScratchDoubleReg, i.InputRegister(1), MemOperand(r0, 0), \
Condition(mode)); \
__ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), \
Condition(mode)); \
__ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
}
case kS390_I64x2Shl: {
VECTOR_SHIFT(veslv, 3);
break;
}
case kS390_I64x2ShrS: {
VECTOR_SHIFT(vesrav, 3);
break;
}
case kS390_I64x2ShrU: {
VECTOR_SHIFT(vesrlv, 3);
break;
}
case kS390_I32x4Shl: {
VECTOR_SHIFT(veslv, 2);
break;
}
case kS390_I32x4ShrS: {
VECTOR_SHIFT(vesrav, 2);
break;
}
case kS390_I32x4ShrU: {
VECTOR_SHIFT(vesrlv, 2);
break;
}
case kS390_I16x8Shl: {
VECTOR_SHIFT(veslv, 1);
break;
}
case kS390_I16x8ShrS: {
VECTOR_SHIFT(vesrav, 1);
break;
}
case kS390_I16x8ShrU: {
VECTOR_SHIFT(vesrlv, 1);
break;
}
case kS390_I8x16Shl: {
VECTOR_SHIFT(veslv, 0);
break;
}
case kS390_I8x16ShrS: {
VECTOR_SHIFT(vesrav, 0);
break;
}
case kS390_I8x16ShrU: {
VECTOR_SHIFT(vesrlv, 0);
break;
}
// vector unary ops
case kS390_F64x2Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
......
......@@ -2143,81 +2143,116 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
#define SIMD_BINOP_LIST(V) \
V(f64x2_add, F64x2Add) \
V(f64x2_sub, F64x2Sub) \
V(f64x2_mul, F64x2Mul) \
V(f64x2_div, F64x2Div) \
V(f64x2_min, F64x2Min) \
V(f64x2_max, F64x2Max) \
V(f64x2_eq, F64x2Eq) \
V(f64x2_ne, F64x2Ne) \
V(f64x2_lt, F64x2Lt) \
V(f64x2_le, F64x2Le) \
V(f32x4_add, F32x4Add) \
V(f32x4_sub, F32x4Sub) \
V(f32x4_mul, F32x4Mul) \
V(f32x4_div, F32x4Div) \
V(f32x4_min, F32x4Min) \
V(f32x4_max, F32x4Max) \
V(f32x4_eq, F32x4Eq) \
V(f32x4_ne, F32x4Ne) \
V(f32x4_lt, F32x4Lt) \
V(f32x4_le, F32x4Le) \
V(i64x2_add, I64x2Add) \
V(i64x2_sub, I64x2Sub) \
V(i64x2_mul, I64x2Mul) \
V(i64x2_eq, I64x2Eq) \
V(i64x2_ne, I64x2Ne) \
V(i64x2_gt_s, I64x2GtS) \
V(i64x2_ge_s, I64x2GeS) \
V(i32x4_add, I32x4Add) \
V(i32x4_sub, I32x4Sub) \
V(i32x4_mul, I32x4Mul) \
V(i32x4_eq, I32x4Eq) \
V(i32x4_ne, I32x4Ne) \
V(i32x4_gt_s, I32x4GtS) \
V(i32x4_ge_s, I32x4GeS) \
V(i32x4_gt_u, I32x4GtU) \
V(i32x4_ge_u, I32x4GeU) \
V(i32x4_min_s, I32x4MinS) \
V(i32x4_min_u, I32x4MinU) \
V(i32x4_max_s, I32x4MaxS) \
V(i32x4_max_u, I32x4MaxU) \
V(i16x8_add, I16x8Add) \
V(i16x8_sub, I16x8Sub) \
V(i16x8_mul, I16x8Mul) \
V(i16x8_eq, I16x8Eq) \
V(i16x8_ne, I16x8Ne) \
V(i16x8_gt_s, I16x8GtS) \
V(i16x8_ge_s, I16x8GeS) \
V(i16x8_gt_u, I16x8GtU) \
V(i16x8_ge_u, I16x8GeU) \
V(i16x8_min_s, I16x8MinS) \
V(i16x8_min_u, I16x8MinU) \
V(i16x8_max_s, I16x8MaxS) \
V(i16x8_max_u, I16x8MaxU) \
V(i8x16_add, I8x16Add) \
V(i8x16_sub, I8x16Sub) \
V(i8x16_eq, I8x16Eq) \
V(i8x16_ne, I8x16Ne) \
V(i8x16_gt_s, I8x16GtS) \
V(i8x16_ge_s, I8x16GeS) \
V(i8x16_gt_u, I8x16GtU) \
V(i8x16_ge_u, I8x16GeU) \
V(i8x16_min_s, I8x16MinS) \
V(i8x16_min_u, I8x16MinU) \
V(i8x16_max_s, I8x16MaxS) \
V(i8x16_max_u, I8x16MaxU)
#define EMIT_SIMD_BINOP(name, op) \
#define SIMD_BINOP_RR_LIST(V) \
V(f64x2_add, F64x2Add, fp) \
V(f64x2_sub, F64x2Sub, fp) \
V(f64x2_mul, F64x2Mul, fp) \
V(f64x2_div, F64x2Div, fp) \
V(f64x2_min, F64x2Min, fp) \
V(f64x2_max, F64x2Max, fp) \
V(f64x2_eq, F64x2Eq, fp) \
V(f64x2_ne, F64x2Ne, fp) \
V(f64x2_lt, F64x2Lt, fp) \
V(f64x2_le, F64x2Le, fp) \
V(f32x4_add, F32x4Add, fp) \
V(f32x4_sub, F32x4Sub, fp) \
V(f32x4_mul, F32x4Mul, fp) \
V(f32x4_div, F32x4Div, fp) \
V(f32x4_min, F32x4Min, fp) \
V(f32x4_max, F32x4Max, fp) \
V(f32x4_eq, F32x4Eq, fp) \
V(f32x4_ne, F32x4Ne, fp) \
V(f32x4_lt, F32x4Lt, fp) \
V(f32x4_le, F32x4Le, fp) \
V(i64x2_add, I64x2Add, fp) \
V(i64x2_sub, I64x2Sub, fp) \
V(i64x2_mul, I64x2Mul, fp) \
V(i64x2_eq, I64x2Eq, fp) \
V(i64x2_ne, I64x2Ne, fp) \
V(i64x2_gt_s, I64x2GtS, fp) \
V(i64x2_ge_s, I64x2GeS, fp) \
V(i64x2_shl, I64x2Shl, gp) \
V(i64x2_shr_s, I64x2ShrS, gp) \
V(i64x2_shr_u, I64x2ShrU, gp) \
V(i32x4_add, I32x4Add, fp) \
V(i32x4_sub, I32x4Sub, fp) \
V(i32x4_mul, I32x4Mul, fp) \
V(i32x4_eq, I32x4Eq, fp) \
V(i32x4_ne, I32x4Ne, fp) \
V(i32x4_gt_s, I32x4GtS, fp) \
V(i32x4_ge_s, I32x4GeS, fp) \
V(i32x4_gt_u, I32x4GtU, fp) \
V(i32x4_ge_u, I32x4GeU, fp) \
V(i32x4_min_s, I32x4MinS, fp) \
V(i32x4_min_u, I32x4MinU, fp) \
V(i32x4_max_s, I32x4MaxS, fp) \
V(i32x4_max_u, I32x4MaxU, fp) \
V(i32x4_shl, I32x4Shl, gp) \
V(i32x4_shr_s, I32x4ShrS, gp) \
V(i32x4_shr_u, I32x4ShrU, gp) \
V(i16x8_add, I16x8Add, fp) \
V(i16x8_sub, I16x8Sub, fp) \
V(i16x8_mul, I16x8Mul, fp) \
V(i16x8_eq, I16x8Eq, fp) \
V(i16x8_ne, I16x8Ne, fp) \
V(i16x8_gt_s, I16x8GtS, fp) \
V(i16x8_ge_s, I16x8GeS, fp) \
V(i16x8_gt_u, I16x8GtU, fp) \
V(i16x8_ge_u, I16x8GeU, fp) \
V(i16x8_min_s, I16x8MinS, fp) \
V(i16x8_min_u, I16x8MinU, fp) \
V(i16x8_max_s, I16x8MaxS, fp) \
V(i16x8_max_u, I16x8MaxU, fp) \
V(i16x8_shl, I16x8Shl, gp) \
V(i16x8_shr_s, I16x8ShrS, gp) \
V(i16x8_shr_u, I16x8ShrU, gp) \
V(i8x16_add, I8x16Add, fp) \
V(i8x16_sub, I8x16Sub, fp) \
V(i8x16_eq, I8x16Eq, fp) \
V(i8x16_ne, I8x16Ne, fp) \
V(i8x16_gt_s, I8x16GtS, fp) \
V(i8x16_ge_s, I8x16GeS, fp) \
V(i8x16_gt_u, I8x16GtU, fp) \
V(i8x16_ge_u, I8x16GeU, fp) \
V(i8x16_min_s, I8x16MinS, fp) \
V(i8x16_min_u, I8x16MinU, fp) \
V(i8x16_max_s, I8x16MaxS, fp) \
V(i8x16_max_u, I8x16MaxU, fp) \
V(i8x16_shl, I8x16Shl, gp) \
V(i8x16_shr_s, I8x16ShrS, gp) \
V(i8x16_shr_u, I8x16ShrU, gp)
#define EMIT_SIMD_BINOP_RR(name, op, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
op(dst.fp(), lhs.fp(), rhs.fp()); \
op(dst.fp(), lhs.fp(), rhs.stype()); \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
#undef EMIT_SIMD_BINOP_RR
#undef SIMD_BINOP_RR_LIST
#define SIMD_BINOP_RI_LIST(V) \
V(i64x2_shli, I64x2Shl) \
V(i64x2_shri_s, I64x2ShrS) \
V(i64x2_shri_u, I64x2ShrU) \
V(i32x4_shli, I32x4Shl) \
V(i32x4_shri_s, I32x4ShrS) \
V(i32x4_shri_u, I32x4ShrU) \
V(i16x8_shli, I16x8Shl) \
V(i16x8_shri_s, I16x8ShrS) \
V(i16x8_shri_u, I16x8ShrU) \
V(i8x16_shli, I8x16Shl) \
V(i8x16_shri_s, I8x16ShrS) \
V(i8x16_shri_u, I8x16ShrU)
#define EMIT_SIMD_BINOP_RI(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t rhs) { \
op(dst.fp(), lhs.fp(), Operand(rhs)); \
}
SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
#undef EMIT_SIMD_BINOP_RI
#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
......@@ -2424,38 +2459,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shl");
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i64x2_shli");
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shr_s");
}
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i64x2_shri_s");
}
void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shr_u");
}
void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i64x2_shri_u");
}
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
......@@ -2520,38 +2523,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shl");
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i32x4_shli");
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shr_s");
}
void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i32x4_shri_s");
}
void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shr_u");
}
void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i32x4_shri_u");
}
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
......@@ -2607,38 +2578,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shl");
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i16x8_shli");
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shr_s");
}
void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i16x8_shri_s");
}
void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shr_u");
}
void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i16x8_shri_u");
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
......@@ -2736,38 +2675,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shl");
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
bailout(kSimd, "i8x16_shli");
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shr_s");
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i8x16_shri_s");
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shr_u");
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
bailout(kSimd, "i8x16_shri_u");
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment