Commit fe492561 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [liftoff]: Implement simd comparisons

Change-Id: I48effbb727b523ac1911584d3072c13671633046
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3077623Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Milad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/master@{#76145}
parent 63be6dde
......@@ -5083,6 +5083,8 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value,
}
// Simd Support.
#define kScratchDoubleReg d13
void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) {
vrep(dst, src, Operand(0), Condition(3));
}
......@@ -5205,39 +5207,64 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, vfa, 0, 0, 3) \
V(F64x2Sub, vfs, 0, 0, 3) \
V(F64x2Mul, vfm, 0, 0, 3) \
V(F64x2Div, vfd, 0, 0, 3) \
V(F64x2Min, vfmin, 1, 0, 3) \
V(F64x2Max, vfmax, 1, 0, 3) \
V(F32x4Add, vfa, 0, 0, 2) \
V(F32x4Sub, vfs, 0, 0, 2) \
V(F32x4Mul, vfm, 0, 0, 2) \
V(F32x4Div, vfd, 0, 0, 2) \
V(F32x4Min, vfmin, 1, 0, 2) \
V(F32x4Max, vfmax, 1, 0, 2) \
V(I64x2Add, va, 0, 0, 3) \
V(I64x2Sub, vs, 0, 0, 3) \
V(I32x4Add, va, 0, 0, 2) \
V(I32x4Sub, vs, 0, 0, 2) \
V(I32x4Mul, vml, 0, 0, 2) \
V(I16x8Add, va, 0, 0, 1) \
V(I16x8Sub, vs, 0, 0, 1) \
V(I16x8Mul, vml, 0, 0, 1) \
V(I8x16Add, va, 0, 0, 0) \
#define SIMD_BINOP_LIST_VRR_B(V) \
V(I64x2Eq, vceq, 0, 3) \
V(I64x2GtS, vch, 0, 3) \
V(I32x4Eq, vceq, 0, 2) \
V(I32x4GtS, vch, 0, 2) \
V(I32x4GtU, vchl, 0, 2) \
V(I16x8Eq, vceq, 0, 1) \
V(I16x8GtS, vch, 0, 1) \
V(I16x8GtU, vchl, 0, 1) \
V(I8x16Eq, vceq, 0, 0) \
V(I8x16GtS, vch, 0, 0) \
V(I8x16GtU, vchl, 0, 0)
#define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2, Condition(c1), Condition(c2)); \
}
SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
#undef EMIT_SIMD_BINOP_VRR_B
#undef SIMD_BINOP_LIST_VRR_B
#define SIMD_BINOP_LIST_VRR_C(V) \
V(F64x2Add, vfa, 0, 0, 3) \
V(F64x2Sub, vfs, 0, 0, 3) \
V(F64x2Mul, vfm, 0, 0, 3) \
V(F64x2Div, vfd, 0, 0, 3) \
V(F64x2Min, vfmin, 1, 0, 3) \
V(F64x2Max, vfmax, 1, 0, 3) \
V(F64x2Eq, vfce, 0, 0, 3) \
V(F32x4Add, vfa, 0, 0, 2) \
V(F32x4Sub, vfs, 0, 0, 2) \
V(F32x4Mul, vfm, 0, 0, 2) \
V(F32x4Div, vfd, 0, 0, 2) \
V(F32x4Min, vfmin, 1, 0, 2) \
V(F32x4Max, vfmax, 1, 0, 2) \
V(F32x4Eq, vfce, 0, 0, 2) \
V(I64x2Add, va, 0, 0, 3) \
V(I64x2Sub, vs, 0, 0, 3) \
V(I32x4Add, va, 0, 0, 2) \
V(I32x4Sub, vs, 0, 0, 2) \
V(I32x4Mul, vml, 0, 0, 2) \
V(I16x8Add, va, 0, 0, 1) \
V(I16x8Sub, vs, 0, 0, 1) \
V(I16x8Mul, vml, 0, 0, 1) \
V(I8x16Add, va, 0, 0, 0) \
V(I8x16Sub, vs, 0, 0, 0)
#define EMIT_SIMD_BINOP(name, op, c1, c2, c3) \
#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
Simd128Register src2) { \
op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
// Opcodes without a 1-1 match.
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
......@@ -5252,6 +5279,113 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
vlvgp(dst, r0, r1);
}
void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3));
}
void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3));
}
void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(3));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(3));
}
void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(2));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(2));
vchl(dst, src1, src2, Condition(0), Condition(2));
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
}
void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(1));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
}
void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(1));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(1));
}
void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(1));
vchl(dst, src1, src2, Condition(0), Condition(1));
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
}
void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(dst, src1, src2, Condition(0), Condition(0));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
}
void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
// Compute !(B > A) which is equal to A >= B.
vch(dst, src2, src1, Condition(0), Condition(0));
vno(dst, dst, dst, Condition(0), Condition(0), Condition(0));
}
void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
vceq(kScratchDoubleReg, src1, src2, Condition(0), Condition(0));
vchl(dst, src1, src2, Condition(0), Condition(0));
vo(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
}
#undef kScratchDoubleReg
} // namespace internal
} // namespace v8
......
......@@ -1078,23 +1078,53 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(F64x2Div) \
V(F64x2Min) \
V(F64x2Max) \
V(F64x2Eq) \
V(F64x2Ne) \
V(F64x2Lt) \
V(F64x2Le) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
V(I64x2Eq) \
V(I64x2Ne) \
V(I64x2GtS) \
V(I64x2GeS) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4GtS) \
V(I32x4GeS) \
V(I32x4GtU) \
V(I32x4GeU) \
V(I16x8Add) \
V(I16x8Sub) \
V(I16x8Mul) \
V(I16x8Eq) \
V(I16x8Ne) \
V(I16x8GtS) \
V(I16x8GeS) \
V(I16x8GtU) \
V(I16x8GeU) \
V(I8x16Add) \
V(I8x16Sub)
V(I8x16Sub) \
V(I8x16Eq) \
V(I8x16Ne) \
V(I8x16GtS) \
V(I8x16GeS) \
V(I8x16GtU) \
V(I8x16GeU)
#define PROTOTYPE_SIMD_BINOP(name) \
void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
......
......@@ -2150,23 +2150,53 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
V(f64x2_div, F64x2Div) \
V(f64x2_min, F64x2Min) \
V(f64x2_max, F64x2Max) \
V(f64x2_eq, F64x2Eq) \
V(f64x2_ne, F64x2Ne) \
V(f64x2_lt, F64x2Lt) \
V(f64x2_le, F64x2Le) \
V(f32x4_add, F32x4Add) \
V(f32x4_sub, F32x4Sub) \
V(f32x4_mul, F32x4Mul) \
V(f32x4_div, F32x4Div) \
V(f32x4_min, F32x4Min) \
V(f32x4_max, F32x4Max) \
V(f32x4_eq, F32x4Eq) \
V(f32x4_ne, F32x4Ne) \
V(f32x4_lt, F32x4Lt) \
V(f32x4_le, F32x4Le) \
V(i64x2_add, I64x2Add) \
V(i64x2_sub, I64x2Sub) \
V(i64x2_mul, I64x2Mul) \
V(i64x2_eq, I64x2Eq) \
V(i64x2_ne, I64x2Ne) \
V(i64x2_gt_s, I64x2GtS) \
V(i64x2_ge_s, I64x2GeS) \
V(i32x4_add, I32x4Add) \
V(i32x4_sub, I32x4Sub) \
V(i32x4_mul, I32x4Mul) \
V(i32x4_eq, I32x4Eq) \
V(i32x4_ne, I32x4Ne) \
V(i32x4_gt_s, I32x4GtS) \
V(i32x4_ge_s, I32x4GeS) \
V(i32x4_gt_u, I32x4GtU) \
V(i32x4_ge_u, I32x4GeU) \
V(i16x8_add, I16x8Add) \
V(i16x8_sub, I16x8Sub) \
V(i16x8_mul, I16x8Mul) \
V(i16x8_eq, I16x8Eq) \
V(i16x8_ne, I16x8Ne) \
V(i16x8_gt_s, I16x8GtS) \
V(i16x8_ge_s, I16x8GeS) \
V(i16x8_gt_u, I16x8GtU) \
V(i16x8_ge_u, I16x8GeU) \
V(i8x16_add, I8x16Add) \
V(i8x16_sub, I8x16Sub)
V(i8x16_sub, I8x16Sub) \
V(i8x16_eq, I8x16Eq) \
V(i8x16_ne, I8x16Ne) \
V(i8x16_gt_s, I8x16GtS) \
V(i8x16_ge_s, I8x16GeS) \
V(i8x16_gt_u, I8x16GtU) \
V(i8x16_ge_u, I8x16GeU)
#define EMIT_SIMD_BINOP(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
......@@ -2822,156 +2852,6 @@ void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16_max_u");
}
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_eq");
}
void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_ne");
}
void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16gt_s");
}
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16gt_u");
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16ge_s");
}
void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16ge_u");
}
void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_eq");
}
void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_ne");
}
void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8gt_s");
}
void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8gt_u");
}
void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8ge_s");
}
void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8ge_u");
}
void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4_eq");
}
void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4_ne");
}
void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4gt_s");
}
void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4gt_u");
}
void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4ge_s");
}
void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
}
void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2.eq");
}
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_ne");
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2.gt_s");
}
void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2.ge_s");
}
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
}
void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_ne");
}
void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_lt");
}
void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_le");
}
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64x2_eq");
}
void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64x2_ne");
}
void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64x2_lt");
}
void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f64x2_le");
}
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
bailout(kUnsupportedArchitecture, "emit_s128_const");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment