Commit d1d7e153 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [liftoff]: Implement simd rounding average

Change-Id: I709253796b8493cf365f21247c367974ffbb3106
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3440435Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#78954}
parent b05a8458
......@@ -5337,50 +5337,52 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
#undef EMIT_SIMD_BINOP_VRR_B
#undef SIMD_BINOP_LIST_VRR_B
#define SIMD_BINOP_LIST_VRR_C(V) \
V(F64x2Add, vfa, 0, 0, 3) \
V(F64x2Sub, vfs, 0, 0, 3) \
V(F64x2Mul, vfm, 0, 0, 3) \
V(F64x2Div, vfd, 0, 0, 3) \
V(F64x2Min, vfmin, 1, 0, 3) \
V(F64x2Max, vfmax, 1, 0, 3) \
V(F64x2Eq, vfce, 0, 0, 3) \
V(F64x2Pmin, vfmin, 3, 0, 3) \
V(F64x2Pmax, vfmax, 3, 0, 3) \
V(F32x4Add, vfa, 0, 0, 2) \
V(F32x4Sub, vfs, 0, 0, 2) \
V(F32x4Mul, vfm, 0, 0, 2) \
V(F32x4Div, vfd, 0, 0, 2) \
V(F32x4Min, vfmin, 1, 0, 2) \
V(F32x4Max, vfmax, 1, 0, 2) \
V(F32x4Eq, vfce, 0, 0, 2) \
V(F32x4Pmin, vfmin, 3, 0, 2) \
V(F32x4Pmax, vfmax, 3, 0, 2) \
V(I64x2Add, va, 0, 0, 3) \
V(I64x2Sub, vs, 0, 0, 3) \
V(I32x4Add, va, 0, 0, 2) \
V(I32x4Sub, vs, 0, 0, 2) \
V(I32x4Mul, vml, 0, 0, 2) \
V(I32x4MinS, vmn, 0, 0, 2) \
V(I32x4MinU, vmnl, 0, 0, 2) \
V(I32x4MaxS, vmx, 0, 0, 2) \
V(I32x4MaxU, vmxl, 0, 0, 2) \
V(I16x8Add, va, 0, 0, 1) \
V(I16x8Sub, vs, 0, 0, 1) \
V(I16x8Mul, vml, 0, 0, 1) \
V(I16x8MinS, vmn, 0, 0, 1) \
V(I16x8MinU, vmnl, 0, 0, 1) \
V(I16x8MaxS, vmx, 0, 0, 1) \
V(I16x8MaxU, vmxl, 0, 0, 1) \
V(I8x16Add, va, 0, 0, 0) \
V(I8x16Sub, vs, 0, 0, 0) \
V(I8x16MinS, vmn, 0, 0, 0) \
V(I8x16MinU, vmnl, 0, 0, 0) \
V(I8x16MaxS, vmx, 0, 0, 0) \
V(I8x16MaxU, vmxl, 0, 0, 0) \
V(S128And, vn, 0, 0, 0) \
V(S128Or, vo, 0, 0, 0) \
V(S128Xor, vx, 0, 0, 0) \
#define SIMD_BINOP_LIST_VRR_C(V) \
V(F64x2Add, vfa, 0, 0, 3) \
V(F64x2Sub, vfs, 0, 0, 3) \
V(F64x2Mul, vfm, 0, 0, 3) \
V(F64x2Div, vfd, 0, 0, 3) \
V(F64x2Min, vfmin, 1, 0, 3) \
V(F64x2Max, vfmax, 1, 0, 3) \
V(F64x2Eq, vfce, 0, 0, 3) \
V(F64x2Pmin, vfmin, 3, 0, 3) \
V(F64x2Pmax, vfmax, 3, 0, 3) \
V(F32x4Add, vfa, 0, 0, 2) \
V(F32x4Sub, vfs, 0, 0, 2) \
V(F32x4Mul, vfm, 0, 0, 2) \
V(F32x4Div, vfd, 0, 0, 2) \
V(F32x4Min, vfmin, 1, 0, 2) \
V(F32x4Max, vfmax, 1, 0, 2) \
V(F32x4Eq, vfce, 0, 0, 2) \
V(F32x4Pmin, vfmin, 3, 0, 2) \
V(F32x4Pmax, vfmax, 3, 0, 2) \
V(I64x2Add, va, 0, 0, 3) \
V(I64x2Sub, vs, 0, 0, 3) \
V(I32x4Add, va, 0, 0, 2) \
V(I32x4Sub, vs, 0, 0, 2) \
V(I32x4Mul, vml, 0, 0, 2) \
V(I32x4MinS, vmn, 0, 0, 2) \
V(I32x4MinU, vmnl, 0, 0, 2) \
V(I32x4MaxS, vmx, 0, 0, 2) \
V(I32x4MaxU, vmxl, 0, 0, 2) \
V(I16x8Add, va, 0, 0, 1) \
V(I16x8Sub, vs, 0, 0, 1) \
V(I16x8Mul, vml, 0, 0, 1) \
V(I16x8MinS, vmn, 0, 0, 1) \
V(I16x8MinU, vmnl, 0, 0, 1) \
V(I16x8MaxS, vmx, 0, 0, 1) \
V(I16x8MaxU, vmxl, 0, 0, 1) \
V(I16x8RoundingAverageU, vavgl, 0, 0, 1) \
V(I8x16Add, va, 0, 0, 0) \
V(I8x16Sub, vs, 0, 0, 0) \
V(I8x16MinS, vmn, 0, 0, 0) \
V(I8x16MinU, vmnl, 0, 0, 0) \
V(I8x16MaxS, vmx, 0, 0, 0) \
V(I8x16MaxU, vmxl, 0, 0, 0) \
V(I8x16RoundingAverageU, vavgl, 0, 0, 0) \
V(S128And, vn, 0, 0, 0) \
V(S128Or, vo, 0, 0, 0) \
V(S128Xor, vx, 0, 0, 0) \
V(S128AndNot, vnc, 0, 0, 0)
#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
......
......@@ -1180,103 +1180,105 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef PROTOTYPE_SIMD_UNOP
#undef SIMD_UNOP_LIST
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
V(F64x2Mul, Simd128Register) \
V(F64x2Div, Simd128Register) \
V(F64x2Min, Simd128Register) \
V(F64x2Max, Simd128Register) \
V(F64x2Eq, Simd128Register) \
V(F64x2Ne, Simd128Register) \
V(F64x2Lt, Simd128Register) \
V(F64x2Le, Simd128Register) \
V(F64x2Pmin, Simd128Register) \
V(F64x2Pmax, Simd128Register) \
V(F32x4Add, Simd128Register) \
V(F32x4Sub, Simd128Register) \
V(F32x4Mul, Simd128Register) \
V(F32x4Div, Simd128Register) \
V(F32x4Min, Simd128Register) \
V(F32x4Max, Simd128Register) \
V(F32x4Eq, Simd128Register) \
V(F32x4Ne, Simd128Register) \
V(F32x4Lt, Simd128Register) \
V(F32x4Le, Simd128Register) \
V(F32x4Pmin, Simd128Register) \
V(F32x4Pmax, Simd128Register) \
V(I64x2Add, Simd128Register) \
V(I64x2Sub, Simd128Register) \
V(I64x2Mul, Simd128Register) \
V(I64x2Eq, Simd128Register) \
V(I64x2Ne, Simd128Register) \
V(I64x2GtS, Simd128Register) \
V(I64x2GeS, Simd128Register) \
V(I64x2Shl, Register) \
V(I64x2ShrS, Register) \
V(I64x2ShrU, Register) \
V(I64x2Shl, const Operand&) \
V(I64x2ShrS, const Operand&) \
V(I64x2ShrU, const Operand&) \
V(I32x4Add, Simd128Register) \
V(I32x4Sub, Simd128Register) \
V(I32x4Mul, Simd128Register) \
V(I32x4Eq, Simd128Register) \
V(I32x4Ne, Simd128Register) \
V(I32x4GtS, Simd128Register) \
V(I32x4GeS, Simd128Register) \
V(I32x4GtU, Simd128Register) \
V(I32x4GeU, Simd128Register) \
V(I32x4MinS, Simd128Register) \
V(I32x4MinU, Simd128Register) \
V(I32x4MaxS, Simd128Register) \
V(I32x4MaxU, Simd128Register) \
V(I32x4Shl, Register) \
V(I32x4ShrS, Register) \
V(I32x4ShrU, Register) \
V(I32x4Shl, const Operand&) \
V(I32x4ShrS, const Operand&) \
V(I32x4ShrU, const Operand&) \
V(I16x8Add, Simd128Register) \
V(I16x8Sub, Simd128Register) \
V(I16x8Mul, Simd128Register) \
V(I16x8Eq, Simd128Register) \
V(I16x8Ne, Simd128Register) \
V(I16x8GtS, Simd128Register) \
V(I16x8GeS, Simd128Register) \
V(I16x8GtU, Simd128Register) \
V(I16x8GeU, Simd128Register) \
V(I16x8MinS, Simd128Register) \
V(I16x8MinU, Simd128Register) \
V(I16x8MaxS, Simd128Register) \
V(I16x8MaxU, Simd128Register) \
V(I16x8Shl, Register) \
V(I16x8ShrS, Register) \
V(I16x8ShrU, Register) \
V(I16x8Shl, const Operand&) \
V(I16x8ShrS, const Operand&) \
V(I16x8ShrU, const Operand&) \
V(I8x16Add, Simd128Register) \
V(I8x16Sub, Simd128Register) \
V(I8x16Eq, Simd128Register) \
V(I8x16Ne, Simd128Register) \
V(I8x16GtS, Simd128Register) \
V(I8x16GeS, Simd128Register) \
V(I8x16GtU, Simd128Register) \
V(I8x16GeU, Simd128Register) \
V(I8x16MinS, Simd128Register) \
V(I8x16MinU, Simd128Register) \
V(I8x16MaxS, Simd128Register) \
V(I8x16MaxU, Simd128Register) \
V(I8x16Shl, Register) \
V(I8x16ShrS, Register) \
V(I8x16ShrU, Register) \
V(I8x16Shl, const Operand&) \
V(I8x16ShrS, const Operand&) \
V(I8x16ShrU, const Operand&) \
V(S128And, Simd128Register) \
V(S128Or, Simd128Register) \
V(S128Xor, Simd128Register) \
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
V(F64x2Mul, Simd128Register) \
V(F64x2Div, Simd128Register) \
V(F64x2Min, Simd128Register) \
V(F64x2Max, Simd128Register) \
V(F64x2Eq, Simd128Register) \
V(F64x2Ne, Simd128Register) \
V(F64x2Lt, Simd128Register) \
V(F64x2Le, Simd128Register) \
V(F64x2Pmin, Simd128Register) \
V(F64x2Pmax, Simd128Register) \
V(F32x4Add, Simd128Register) \
V(F32x4Sub, Simd128Register) \
V(F32x4Mul, Simd128Register) \
V(F32x4Div, Simd128Register) \
V(F32x4Min, Simd128Register) \
V(F32x4Max, Simd128Register) \
V(F32x4Eq, Simd128Register) \
V(F32x4Ne, Simd128Register) \
V(F32x4Lt, Simd128Register) \
V(F32x4Le, Simd128Register) \
V(F32x4Pmin, Simd128Register) \
V(F32x4Pmax, Simd128Register) \
V(I64x2Add, Simd128Register) \
V(I64x2Sub, Simd128Register) \
V(I64x2Mul, Simd128Register) \
V(I64x2Eq, Simd128Register) \
V(I64x2Ne, Simd128Register) \
V(I64x2GtS, Simd128Register) \
V(I64x2GeS, Simd128Register) \
V(I64x2Shl, Register) \
V(I64x2ShrS, Register) \
V(I64x2ShrU, Register) \
V(I64x2Shl, const Operand&) \
V(I64x2ShrS, const Operand&) \
V(I64x2ShrU, const Operand&) \
V(I32x4Add, Simd128Register) \
V(I32x4Sub, Simd128Register) \
V(I32x4Mul, Simd128Register) \
V(I32x4Eq, Simd128Register) \
V(I32x4Ne, Simd128Register) \
V(I32x4GtS, Simd128Register) \
V(I32x4GeS, Simd128Register) \
V(I32x4GtU, Simd128Register) \
V(I32x4GeU, Simd128Register) \
V(I32x4MinS, Simd128Register) \
V(I32x4MinU, Simd128Register) \
V(I32x4MaxS, Simd128Register) \
V(I32x4MaxU, Simd128Register) \
V(I32x4Shl, Register) \
V(I32x4ShrS, Register) \
V(I32x4ShrU, Register) \
V(I32x4Shl, const Operand&) \
V(I32x4ShrS, const Operand&) \
V(I32x4ShrU, const Operand&) \
V(I16x8Add, Simd128Register) \
V(I16x8Sub, Simd128Register) \
V(I16x8Mul, Simd128Register) \
V(I16x8Eq, Simd128Register) \
V(I16x8Ne, Simd128Register) \
V(I16x8GtS, Simd128Register) \
V(I16x8GeS, Simd128Register) \
V(I16x8GtU, Simd128Register) \
V(I16x8GeU, Simd128Register) \
V(I16x8MinS, Simd128Register) \
V(I16x8MinU, Simd128Register) \
V(I16x8MaxS, Simd128Register) \
V(I16x8MaxU, Simd128Register) \
V(I16x8Shl, Register) \
V(I16x8ShrS, Register) \
V(I16x8ShrU, Register) \
V(I16x8Shl, const Operand&) \
V(I16x8ShrS, const Operand&) \
V(I16x8ShrU, const Operand&) \
V(I16x8RoundingAverageU, Simd128Register) \
V(I8x16Add, Simd128Register) \
V(I8x16Sub, Simd128Register) \
V(I8x16Eq, Simd128Register) \
V(I8x16Ne, Simd128Register) \
V(I8x16GtS, Simd128Register) \
V(I8x16GeS, Simd128Register) \
V(I8x16GtU, Simd128Register) \
V(I8x16GeU, Simd128Register) \
V(I8x16MinS, Simd128Register) \
V(I8x16MinU, Simd128Register) \
V(I8x16MaxS, Simd128Register) \
V(I8x16MaxU, Simd128Register) \
V(I8x16Shl, Register) \
V(I8x16ShrS, Register) \
V(I8x16ShrU, Register) \
V(I8x16Shl, const Operand&) \
V(I8x16ShrS, const Operand&) \
V(I8x16ShrU, const Operand&) \
V(I8x16RoundingAverageU, Simd128Register) \
V(S128And, Simd128Register) \
V(S128Or, Simd128Register) \
V(S128Xor, Simd128Register) \
V(S128AndNot, Simd128Register)
#define PROTOTYPE_SIMD_BINOP(name, stype) \
......
......@@ -2554,91 +2554,93 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
V(F64x2Mul, Simd128Register) \
V(F64x2Div, Simd128Register) \
V(F64x2Min, Simd128Register) \
V(F64x2Max, Simd128Register) \
V(F64x2Eq, Simd128Register) \
V(F64x2Ne, Simd128Register) \
V(F64x2Lt, Simd128Register) \
V(F64x2Le, Simd128Register) \
V(F64x2Pmin, Simd128Register) \
V(F64x2Pmax, Simd128Register) \
V(F32x4Add, Simd128Register) \
V(F32x4Sub, Simd128Register) \
V(F32x4Mul, Simd128Register) \
V(F32x4Div, Simd128Register) \
V(F32x4Min, Simd128Register) \
V(F32x4Max, Simd128Register) \
V(F32x4Eq, Simd128Register) \
V(F32x4Ne, Simd128Register) \
V(F32x4Lt, Simd128Register) \
V(F32x4Le, Simd128Register) \
V(F32x4Pmin, Simd128Register) \
V(F32x4Pmax, Simd128Register) \
V(I64x2Add, Simd128Register) \
V(I64x2Sub, Simd128Register) \
V(I64x2Mul, Simd128Register) \
V(I64x2Eq, Simd128Register) \
V(I64x2Ne, Simd128Register) \
V(I64x2GtS, Simd128Register) \
V(I64x2GeS, Simd128Register) \
V(I64x2Shl, Register) \
V(I64x2ShrS, Register) \
V(I64x2ShrU, Register) \
V(I32x4Add, Simd128Register) \
V(I32x4Sub, Simd128Register) \
V(I32x4Mul, Simd128Register) \
V(I32x4Eq, Simd128Register) \
V(I32x4Ne, Simd128Register) \
V(I32x4GtS, Simd128Register) \
V(I32x4GeS, Simd128Register) \
V(I32x4GtU, Simd128Register) \
V(I32x4GeU, Simd128Register) \
V(I32x4MinS, Simd128Register) \
V(I32x4MinU, Simd128Register) \
V(I32x4MaxS, Simd128Register) \
V(I32x4MaxU, Simd128Register) \
V(I32x4Shl, Register) \
V(I32x4ShrS, Register) \
V(I32x4ShrU, Register) \
V(I16x8Add, Simd128Register) \
V(I16x8Sub, Simd128Register) \
V(I16x8Mul, Simd128Register) \
V(I16x8Eq, Simd128Register) \
V(I16x8Ne, Simd128Register) \
V(I16x8GtS, Simd128Register) \
V(I16x8GeS, Simd128Register) \
V(I16x8GtU, Simd128Register) \
V(I16x8GeU, Simd128Register) \
V(I16x8MinS, Simd128Register) \
V(I16x8MinU, Simd128Register) \
V(I16x8MaxS, Simd128Register) \
V(I16x8MaxU, Simd128Register) \
V(I16x8Shl, Register) \
V(I16x8ShrS, Register) \
V(I16x8ShrU, Register) \
V(I8x16Add, Simd128Register) \
V(I8x16Sub, Simd128Register) \
V(I8x16Eq, Simd128Register) \
V(I8x16Ne, Simd128Register) \
V(I8x16GtS, Simd128Register) \
V(I8x16GeS, Simd128Register) \
V(I8x16GtU, Simd128Register) \
V(I8x16GeU, Simd128Register) \
V(I8x16MinS, Simd128Register) \
V(I8x16MinU, Simd128Register) \
V(I8x16MaxS, Simd128Register) \
V(I8x16MaxU, Simd128Register) \
V(I8x16Shl, Register) \
V(I8x16ShrS, Register) \
V(I8x16ShrU, Register) \
V(S128And, Simd128Register) \
V(S128Or, Simd128Register) \
V(S128Xor, Simd128Register) \
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
V(F64x2Mul, Simd128Register) \
V(F64x2Div, Simd128Register) \
V(F64x2Min, Simd128Register) \
V(F64x2Max, Simd128Register) \
V(F64x2Eq, Simd128Register) \
V(F64x2Ne, Simd128Register) \
V(F64x2Lt, Simd128Register) \
V(F64x2Le, Simd128Register) \
V(F64x2Pmin, Simd128Register) \
V(F64x2Pmax, Simd128Register) \
V(F32x4Add, Simd128Register) \
V(F32x4Sub, Simd128Register) \
V(F32x4Mul, Simd128Register) \
V(F32x4Div, Simd128Register) \
V(F32x4Min, Simd128Register) \
V(F32x4Max, Simd128Register) \
V(F32x4Eq, Simd128Register) \
V(F32x4Ne, Simd128Register) \
V(F32x4Lt, Simd128Register) \
V(F32x4Le, Simd128Register) \
V(F32x4Pmin, Simd128Register) \
V(F32x4Pmax, Simd128Register) \
V(I64x2Add, Simd128Register) \
V(I64x2Sub, Simd128Register) \
V(I64x2Mul, Simd128Register) \
V(I64x2Eq, Simd128Register) \
V(I64x2Ne, Simd128Register) \
V(I64x2GtS, Simd128Register) \
V(I64x2GeS, Simd128Register) \
V(I64x2Shl, Register) \
V(I64x2ShrS, Register) \
V(I64x2ShrU, Register) \
V(I32x4Add, Simd128Register) \
V(I32x4Sub, Simd128Register) \
V(I32x4Mul, Simd128Register) \
V(I32x4Eq, Simd128Register) \
V(I32x4Ne, Simd128Register) \
V(I32x4GtS, Simd128Register) \
V(I32x4GeS, Simd128Register) \
V(I32x4GtU, Simd128Register) \
V(I32x4GeU, Simd128Register) \
V(I32x4MinS, Simd128Register) \
V(I32x4MinU, Simd128Register) \
V(I32x4MaxS, Simd128Register) \
V(I32x4MaxU, Simd128Register) \
V(I32x4Shl, Register) \
V(I32x4ShrS, Register) \
V(I32x4ShrU, Register) \
V(I16x8Add, Simd128Register) \
V(I16x8Sub, Simd128Register) \
V(I16x8Mul, Simd128Register) \
V(I16x8Eq, Simd128Register) \
V(I16x8Ne, Simd128Register) \
V(I16x8GtS, Simd128Register) \
V(I16x8GeS, Simd128Register) \
V(I16x8GtU, Simd128Register) \
V(I16x8GeU, Simd128Register) \
V(I16x8MinS, Simd128Register) \
V(I16x8MinU, Simd128Register) \
V(I16x8MaxS, Simd128Register) \
V(I16x8MaxU, Simd128Register) \
V(I16x8Shl, Register) \
V(I16x8ShrS, Register) \
V(I16x8ShrU, Register) \
V(I16x8RoundingAverageU, Simd128Register) \
V(I8x16Add, Simd128Register) \
V(I8x16Sub, Simd128Register) \
V(I8x16Eq, Simd128Register) \
V(I8x16Ne, Simd128Register) \
V(I8x16GtS, Simd128Register) \
V(I8x16GeS, Simd128Register) \
V(I8x16GtU, Simd128Register) \
V(I8x16GeU, Simd128Register) \
V(I8x16MinS, Simd128Register) \
V(I8x16MinU, Simd128Register) \
V(I8x16MaxS, Simd128Register) \
V(I8x16MaxU, Simd128Register) \
V(I8x16Shl, Register) \
V(I8x16ShrS, Register) \
V(I8x16ShrU, Register) \
V(I8x16RoundingAverageU, Simd128Register) \
V(S128And, Simd128Register) \
V(S128Or, Simd128Register) \
V(S128Xor, Simd128Register) \
V(S128AndNot, Simd128Register)
#define EMIT_SIMD_BINOP(name, stype) \
......@@ -2796,19 +2798,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef EMIT_SIMD_QFM
#undef SIMD_QFM_LIST
// vector binops
case kS390_I16x8RoundingAverageU: {
__ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(1));
break;
}
case kS390_I8x16RoundingAverageU: {
__ vavgl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(0));
break;
}
// vector unary ops
case kS390_F32x4RecipApprox: {
__ mov(kScratchReg, Operand(1));
......
......@@ -2256,91 +2256,93 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
#define SIMD_BINOP_RR_LIST(V) \
V(f64x2_add, F64x2Add, fp) \
V(f64x2_sub, F64x2Sub, fp) \
V(f64x2_mul, F64x2Mul, fp) \
V(f64x2_div, F64x2Div, fp) \
V(f64x2_min, F64x2Min, fp) \
V(f64x2_max, F64x2Max, fp) \
V(f64x2_eq, F64x2Eq, fp) \
V(f64x2_ne, F64x2Ne, fp) \
V(f64x2_lt, F64x2Lt, fp) \
V(f64x2_le, F64x2Le, fp) \
V(f64x2_pmin, F64x2Pmin, fp) \
V(f64x2_pmax, F64x2Pmax, fp) \
V(f32x4_add, F32x4Add, fp) \
V(f32x4_sub, F32x4Sub, fp) \
V(f32x4_mul, F32x4Mul, fp) \
V(f32x4_div, F32x4Div, fp) \
V(f32x4_min, F32x4Min, fp) \
V(f32x4_max, F32x4Max, fp) \
V(f32x4_eq, F32x4Eq, fp) \
V(f32x4_ne, F32x4Ne, fp) \
V(f32x4_lt, F32x4Lt, fp) \
V(f32x4_le, F32x4Le, fp) \
V(f32x4_pmin, F32x4Pmin, fp) \
V(f32x4_pmax, F32x4Pmax, fp) \
V(i64x2_add, I64x2Add, fp) \
V(i64x2_sub, I64x2Sub, fp) \
V(i64x2_mul, I64x2Mul, fp) \
V(i64x2_eq, I64x2Eq, fp) \
V(i64x2_ne, I64x2Ne, fp) \
V(i64x2_gt_s, I64x2GtS, fp) \
V(i64x2_ge_s, I64x2GeS, fp) \
V(i64x2_shl, I64x2Shl, gp) \
V(i64x2_shr_s, I64x2ShrS, gp) \
V(i64x2_shr_u, I64x2ShrU, gp) \
V(i32x4_add, I32x4Add, fp) \
V(i32x4_sub, I32x4Sub, fp) \
V(i32x4_mul, I32x4Mul, fp) \
V(i32x4_eq, I32x4Eq, fp) \
V(i32x4_ne, I32x4Ne, fp) \
V(i32x4_gt_s, I32x4GtS, fp) \
V(i32x4_ge_s, I32x4GeS, fp) \
V(i32x4_gt_u, I32x4GtU, fp) \
V(i32x4_ge_u, I32x4GeU, fp) \
V(i32x4_min_s, I32x4MinS, fp) \
V(i32x4_min_u, I32x4MinU, fp) \
V(i32x4_max_s, I32x4MaxS, fp) \
V(i32x4_max_u, I32x4MaxU, fp) \
V(i32x4_shl, I32x4Shl, gp) \
V(i32x4_shr_s, I32x4ShrS, gp) \
V(i32x4_shr_u, I32x4ShrU, gp) \
V(i16x8_add, I16x8Add, fp) \
V(i16x8_sub, I16x8Sub, fp) \
V(i16x8_mul, I16x8Mul, fp) \
V(i16x8_eq, I16x8Eq, fp) \
V(i16x8_ne, I16x8Ne, fp) \
V(i16x8_gt_s, I16x8GtS, fp) \
V(i16x8_ge_s, I16x8GeS, fp) \
V(i16x8_gt_u, I16x8GtU, fp) \
V(i16x8_ge_u, I16x8GeU, fp) \
V(i16x8_min_s, I16x8MinS, fp) \
V(i16x8_min_u, I16x8MinU, fp) \
V(i16x8_max_s, I16x8MaxS, fp) \
V(i16x8_max_u, I16x8MaxU, fp) \
V(i16x8_shl, I16x8Shl, gp) \
V(i16x8_shr_s, I16x8ShrS, gp) \
V(i16x8_shr_u, I16x8ShrU, gp) \
V(i8x16_add, I8x16Add, fp) \
V(i8x16_sub, I8x16Sub, fp) \
V(i8x16_eq, I8x16Eq, fp) \
V(i8x16_ne, I8x16Ne, fp) \
V(i8x16_gt_s, I8x16GtS, fp) \
V(i8x16_ge_s, I8x16GeS, fp) \
V(i8x16_gt_u, I8x16GtU, fp) \
V(i8x16_ge_u, I8x16GeU, fp) \
V(i8x16_min_s, I8x16MinS, fp) \
V(i8x16_min_u, I8x16MinU, fp) \
V(i8x16_max_s, I8x16MaxS, fp) \
V(i8x16_max_u, I8x16MaxU, fp) \
V(i8x16_shl, I8x16Shl, gp) \
V(i8x16_shr_s, I8x16ShrS, gp) \
V(i8x16_shr_u, I8x16ShrU, gp) \
V(s128_and, S128And, fp) \
V(s128_or, S128Or, fp) \
V(s128_xor, S128Xor, fp) \
#define SIMD_BINOP_RR_LIST(V) \
V(f64x2_add, F64x2Add, fp) \
V(f64x2_sub, F64x2Sub, fp) \
V(f64x2_mul, F64x2Mul, fp) \
V(f64x2_div, F64x2Div, fp) \
V(f64x2_min, F64x2Min, fp) \
V(f64x2_max, F64x2Max, fp) \
V(f64x2_eq, F64x2Eq, fp) \
V(f64x2_ne, F64x2Ne, fp) \
V(f64x2_lt, F64x2Lt, fp) \
V(f64x2_le, F64x2Le, fp) \
V(f64x2_pmin, F64x2Pmin, fp) \
V(f64x2_pmax, F64x2Pmax, fp) \
V(f32x4_add, F32x4Add, fp) \
V(f32x4_sub, F32x4Sub, fp) \
V(f32x4_mul, F32x4Mul, fp) \
V(f32x4_div, F32x4Div, fp) \
V(f32x4_min, F32x4Min, fp) \
V(f32x4_max, F32x4Max, fp) \
V(f32x4_eq, F32x4Eq, fp) \
V(f32x4_ne, F32x4Ne, fp) \
V(f32x4_lt, F32x4Lt, fp) \
V(f32x4_le, F32x4Le, fp) \
V(f32x4_pmin, F32x4Pmin, fp) \
V(f32x4_pmax, F32x4Pmax, fp) \
V(i64x2_add, I64x2Add, fp) \
V(i64x2_sub, I64x2Sub, fp) \
V(i64x2_mul, I64x2Mul, fp) \
V(i64x2_eq, I64x2Eq, fp) \
V(i64x2_ne, I64x2Ne, fp) \
V(i64x2_gt_s, I64x2GtS, fp) \
V(i64x2_ge_s, I64x2GeS, fp) \
V(i64x2_shl, I64x2Shl, gp) \
V(i64x2_shr_s, I64x2ShrS, gp) \
V(i64x2_shr_u, I64x2ShrU, gp) \
V(i32x4_add, I32x4Add, fp) \
V(i32x4_sub, I32x4Sub, fp) \
V(i32x4_mul, I32x4Mul, fp) \
V(i32x4_eq, I32x4Eq, fp) \
V(i32x4_ne, I32x4Ne, fp) \
V(i32x4_gt_s, I32x4GtS, fp) \
V(i32x4_ge_s, I32x4GeS, fp) \
V(i32x4_gt_u, I32x4GtU, fp) \
V(i32x4_ge_u, I32x4GeU, fp) \
V(i32x4_min_s, I32x4MinS, fp) \
V(i32x4_min_u, I32x4MinU, fp) \
V(i32x4_max_s, I32x4MaxS, fp) \
V(i32x4_max_u, I32x4MaxU, fp) \
V(i32x4_shl, I32x4Shl, gp) \
V(i32x4_shr_s, I32x4ShrS, gp) \
V(i32x4_shr_u, I32x4ShrU, gp) \
V(i16x8_add, I16x8Add, fp) \
V(i16x8_sub, I16x8Sub, fp) \
V(i16x8_mul, I16x8Mul, fp) \
V(i16x8_eq, I16x8Eq, fp) \
V(i16x8_ne, I16x8Ne, fp) \
V(i16x8_gt_s, I16x8GtS, fp) \
V(i16x8_ge_s, I16x8GeS, fp) \
V(i16x8_gt_u, I16x8GtU, fp) \
V(i16x8_ge_u, I16x8GeU, fp) \
V(i16x8_min_s, I16x8MinS, fp) \
V(i16x8_min_u, I16x8MinU, fp) \
V(i16x8_max_s, I16x8MaxS, fp) \
V(i16x8_max_u, I16x8MaxU, fp) \
V(i16x8_shl, I16x8Shl, gp) \
V(i16x8_shr_s, I16x8ShrS, gp) \
V(i16x8_shr_u, I16x8ShrU, gp) \
V(i16x8_rounding_average_u, I16x8RoundingAverageU, fp) \
V(i8x16_add, I8x16Add, fp) \
V(i8x16_sub, I8x16Sub, fp) \
V(i8x16_eq, I8x16Eq, fp) \
V(i8x16_ne, I8x16Ne, fp) \
V(i8x16_gt_s, I8x16GtS, fp) \
V(i8x16_ge_s, I8x16GeS, fp) \
V(i8x16_gt_u, I8x16GtU, fp) \
V(i8x16_ge_u, I8x16GeU, fp) \
V(i8x16_min_s, I8x16MinS, fp) \
V(i8x16_min_u, I8x16MinU, fp) \
V(i8x16_max_s, I8x16MaxS, fp) \
V(i8x16_max_u, I8x16MaxU, fp) \
V(i8x16_shl, I8x16Shl, gp) \
V(i8x16_shr_s, I8x16ShrS, gp) \
V(i8x16_shr_u, I8x16ShrU, gp) \
V(i8x16_rounding_average_u, I8x16RoundingAverageU, fp) \
V(s128_and, S128And, fp) \
V(s128_or, S128Or, fp) \
V(s128_xor, S128Xor, fp) \
V(s128_and_not, S128AndNot, fp)
#define EMIT_SIMD_BINOP_RR(name, op, stype) \
......@@ -2735,18 +2737,6 @@ void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
}
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_rounding_average_u");
}
void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
LoadU64(limit_address, MemOperand(limit_address));
CmpU64(sp, limit_address);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment