Commit 8df14bbf authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [liftoff]: Implement simd min/max opcodes

Change-Id: Icd3e991d1b00c6846e7fa7330e39f62d16ef2028
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3083081Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/master@{#76199}
parent eb41cd58
...@@ -5249,11 +5249,23 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B) ...@@ -5249,11 +5249,23 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B)
V(I32x4Add, va, 0, 0, 2) \ V(I32x4Add, va, 0, 0, 2) \
V(I32x4Sub, vs, 0, 0, 2) \ V(I32x4Sub, vs, 0, 0, 2) \
V(I32x4Mul, vml, 0, 0, 2) \ V(I32x4Mul, vml, 0, 0, 2) \
V(I32x4MinS, vmn, 0, 0, 2) \
V(I32x4MinU, vmnl, 0, 0, 2) \
V(I32x4MaxS, vmx, 0, 0, 2) \
V(I32x4MaxU, vmxl, 0, 0, 2) \
V(I16x8Add, va, 0, 0, 1) \ V(I16x8Add, va, 0, 0, 1) \
V(I16x8Sub, vs, 0, 0, 1) \ V(I16x8Sub, vs, 0, 0, 1) \
V(I16x8Mul, vml, 0, 0, 1) \ V(I16x8Mul, vml, 0, 0, 1) \
V(I16x8MinS, vmn, 0, 0, 1) \
V(I16x8MinU, vmnl, 0, 0, 1) \
V(I16x8MaxS, vmx, 0, 0, 1) \
V(I16x8MaxU, vmxl, 0, 0, 1) \
V(I8x16Add, va, 0, 0, 0) \ V(I8x16Add, va, 0, 0, 0) \
V(I8x16Sub, vs, 0, 0, 0) V(I8x16Sub, vs, 0, 0, 0) \
V(I8x16MinS, vmn, 0, 0, 0) \
V(I8x16MinU, vmnl, 0, 0, 0) \
V(I8x16MaxS, vmx, 0, 0, 0) \
V(I8x16MaxU, vmxl, 0, 0, 0)
#define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \ #define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
......
...@@ -1108,6 +1108,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1108,6 +1108,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I32x4GeS) \ V(I32x4GeS) \
V(I32x4GtU) \ V(I32x4GtU) \
V(I32x4GeU) \ V(I32x4GeU) \
V(I32x4MinS) \
V(I32x4MinU) \
V(I32x4MaxS) \
V(I32x4MaxU) \
V(I16x8Add) \ V(I16x8Add) \
V(I16x8Sub) \ V(I16x8Sub) \
V(I16x8Mul) \ V(I16x8Mul) \
...@@ -1117,6 +1121,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1117,6 +1121,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I16x8GeS) \ V(I16x8GeS) \
V(I16x8GtU) \ V(I16x8GtU) \
V(I16x8GeU) \ V(I16x8GeU) \
V(I16x8MinS) \
V(I16x8MinU) \
V(I16x8MaxS) \
V(I16x8MaxU) \
V(I8x16Add) \ V(I8x16Add) \
V(I8x16Sub) \ V(I8x16Sub) \
V(I8x16Eq) \ V(I8x16Eq) \
...@@ -1124,7 +1132,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1124,7 +1132,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(I8x16GtS) \ V(I8x16GtS) \
V(I8x16GeS) \ V(I8x16GeS) \
V(I8x16GtU) \ V(I8x16GtU) \
V(I8x16GeU) V(I8x16GeU) \
V(I8x16MinS) \
V(I8x16MinU) \
V(I8x16MaxS) \
V(I8x16MaxU)
#define PROTOTYPE_SIMD_BINOP(name) \ #define PROTOTYPE_SIMD_BINOP(name) \
void name(Simd128Register dst, Simd128Register src1, Simd128Register src2); void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
......
...@@ -2519,6 +2519,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2519,6 +2519,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I32x4GeS) \ V(I32x4GeS) \
V(I32x4GtU) \ V(I32x4GtU) \
V(I32x4GeU) \ V(I32x4GeU) \
V(I32x4MinS) \
V(I32x4MinU) \
V(I32x4MaxS) \
V(I32x4MaxU) \
V(I16x8Add) \ V(I16x8Add) \
V(I16x8Sub) \ V(I16x8Sub) \
V(I16x8Mul) \ V(I16x8Mul) \
...@@ -2528,6 +2532,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2528,6 +2532,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I16x8GeS) \ V(I16x8GeS) \
V(I16x8GtU) \ V(I16x8GtU) \
V(I16x8GeU) \ V(I16x8GeU) \
V(I16x8MinS) \
V(I16x8MinU) \
V(I16x8MaxS) \
V(I16x8MaxU) \
V(I8x16Add) \ V(I8x16Add) \
V(I8x16Sub) \ V(I8x16Sub) \
V(I8x16Eq) \ V(I8x16Eq) \
...@@ -2535,7 +2543,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2535,7 +2543,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(I8x16GtS) \ V(I8x16GtS) \
V(I8x16GeS) \ V(I8x16GeS) \
V(I8x16GtU) \ V(I8x16GtU) \
V(I8x16GeU) V(I8x16GeU) \
V(I8x16MinS) \
V(I8x16MinU) \
V(I8x16MaxS) \
V(I8x16MaxU)
#define EMIT_SIMD_BINOP(name) \ #define EMIT_SIMD_BINOP(name) \
case kS390_##name: { \ case kS390_##name: { \
...@@ -2645,78 +2657,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2645,78 +2657,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0)); Condition(0));
break; break;
} }
case kS390_I32x4MinS: {
__ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
case kS390_I32x4MinU: {
__ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
case kS390_I16x8MinS: {
__ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(1));
break;
}
case kS390_I16x8MinU: {
__ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(1));
break;
}
case kS390_I8x16MinS: {
__ vmn(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(0));
break;
}
case kS390_I8x16MinU: {
__ vmnl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(0));
break;
}
case kS390_I32x4MaxS: {
__ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
case kS390_I32x4MaxU: {
__ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(2));
break;
}
case kS390_I16x8MaxS: {
__ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(1));
break;
}
case kS390_I16x8MaxU: {
__ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(1));
break;
}
case kS390_I8x16MaxS: {
__ vmx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(0));
break;
}
case kS390_I8x16MaxU: {
__ vmxl(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), Condition(0), Condition(0),
Condition(0));
break;
}
// vector shifts // vector shifts
#define VECTOR_SHIFT(op, mode) \ #define VECTOR_SHIFT(op, mode) \
{ \ { \
......
...@@ -2143,60 +2143,72 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target, ...@@ -2143,60 +2143,72 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI b(condition, target); // branch if SMI
} }
#define SIMD_BINOP_LIST(V) \ #define SIMD_BINOP_LIST(V) \
V(f64x2_add, F64x2Add) \ V(f64x2_add, F64x2Add) \
V(f64x2_sub, F64x2Sub) \ V(f64x2_sub, F64x2Sub) \
V(f64x2_mul, F64x2Mul) \ V(f64x2_mul, F64x2Mul) \
V(f64x2_div, F64x2Div) \ V(f64x2_div, F64x2Div) \
V(f64x2_min, F64x2Min) \ V(f64x2_min, F64x2Min) \
V(f64x2_max, F64x2Max) \ V(f64x2_max, F64x2Max) \
V(f64x2_eq, F64x2Eq) \ V(f64x2_eq, F64x2Eq) \
V(f64x2_ne, F64x2Ne) \ V(f64x2_ne, F64x2Ne) \
V(f64x2_lt, F64x2Lt) \ V(f64x2_lt, F64x2Lt) \
V(f64x2_le, F64x2Le) \ V(f64x2_le, F64x2Le) \
V(f32x4_add, F32x4Add) \ V(f32x4_add, F32x4Add) \
V(f32x4_sub, F32x4Sub) \ V(f32x4_sub, F32x4Sub) \
V(f32x4_mul, F32x4Mul) \ V(f32x4_mul, F32x4Mul) \
V(f32x4_div, F32x4Div) \ V(f32x4_div, F32x4Div) \
V(f32x4_min, F32x4Min) \ V(f32x4_min, F32x4Min) \
V(f32x4_max, F32x4Max) \ V(f32x4_max, F32x4Max) \
V(f32x4_eq, F32x4Eq) \ V(f32x4_eq, F32x4Eq) \
V(f32x4_ne, F32x4Ne) \ V(f32x4_ne, F32x4Ne) \
V(f32x4_lt, F32x4Lt) \ V(f32x4_lt, F32x4Lt) \
V(f32x4_le, F32x4Le) \ V(f32x4_le, F32x4Le) \
V(i64x2_add, I64x2Add) \ V(i64x2_add, I64x2Add) \
V(i64x2_sub, I64x2Sub) \ V(i64x2_sub, I64x2Sub) \
V(i64x2_mul, I64x2Mul) \ V(i64x2_mul, I64x2Mul) \
V(i64x2_eq, I64x2Eq) \ V(i64x2_eq, I64x2Eq) \
V(i64x2_ne, I64x2Ne) \ V(i64x2_ne, I64x2Ne) \
V(i64x2_gt_s, I64x2GtS) \ V(i64x2_gt_s, I64x2GtS) \
V(i64x2_ge_s, I64x2GeS) \ V(i64x2_ge_s, I64x2GeS) \
V(i32x4_add, I32x4Add) \ V(i32x4_add, I32x4Add) \
V(i32x4_sub, I32x4Sub) \ V(i32x4_sub, I32x4Sub) \
V(i32x4_mul, I32x4Mul) \ V(i32x4_mul, I32x4Mul) \
V(i32x4_eq, I32x4Eq) \ V(i32x4_eq, I32x4Eq) \
V(i32x4_ne, I32x4Ne) \ V(i32x4_ne, I32x4Ne) \
V(i32x4_gt_s, I32x4GtS) \ V(i32x4_gt_s, I32x4GtS) \
V(i32x4_ge_s, I32x4GeS) \ V(i32x4_ge_s, I32x4GeS) \
V(i32x4_gt_u, I32x4GtU) \ V(i32x4_gt_u, I32x4GtU) \
V(i32x4_ge_u, I32x4GeU) \ V(i32x4_ge_u, I32x4GeU) \
V(i16x8_add, I16x8Add) \ V(i32x4_min_s, I32x4MinS) \
V(i16x8_sub, I16x8Sub) \ V(i32x4_min_u, I32x4MinU) \
V(i16x8_mul, I16x8Mul) \ V(i32x4_max_s, I32x4MaxS) \
V(i16x8_eq, I16x8Eq) \ V(i32x4_max_u, I32x4MaxU) \
V(i16x8_ne, I16x8Ne) \ V(i16x8_add, I16x8Add) \
V(i16x8_gt_s, I16x8GtS) \ V(i16x8_sub, I16x8Sub) \
V(i16x8_ge_s, I16x8GeS) \ V(i16x8_mul, I16x8Mul) \
V(i16x8_gt_u, I16x8GtU) \ V(i16x8_eq, I16x8Eq) \
V(i16x8_ge_u, I16x8GeU) \ V(i16x8_ne, I16x8Ne) \
V(i8x16_add, I8x16Add) \ V(i16x8_gt_s, I16x8GtS) \
V(i8x16_sub, I8x16Sub) \ V(i16x8_ge_s, I16x8GeS) \
V(i8x16_eq, I8x16Eq) \ V(i16x8_gt_u, I16x8GtU) \
V(i8x16_ne, I8x16Ne) \ V(i16x8_ge_u, I16x8GeU) \
V(i8x16_gt_s, I8x16GtS) \ V(i16x8_min_s, I16x8MinS) \
V(i8x16_ge_s, I8x16GeS) \ V(i16x8_min_u, I16x8MinU) \
V(i8x16_gt_u, I8x16GtU) \ V(i16x8_max_s, I16x8MaxS) \
V(i8x16_ge_u, I8x16GeU) V(i16x8_max_u, I16x8MaxU) \
V(i8x16_add, I8x16Add) \
V(i8x16_sub, I8x16Sub) \
V(i8x16_eq, I8x16Eq) \
V(i8x16_ne, I8x16Ne) \
V(i8x16_gt_s, I8x16GtS) \
V(i8x16_ge_s, I8x16GeS) \
V(i8x16_gt_u, I8x16GtU) \
V(i8x16_ge_u, I8x16GeU) \
V(i8x16_min_s, I8x16MinS) \
V(i8x16_min_u, I8x16MinU) \
V(i8x16_max_s, I8x16MaxS) \
V(i8x16_max_u, I8x16MaxU)
#define EMIT_SIMD_BINOP(name, op) \ #define EMIT_SIMD_BINOP(name, op) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
...@@ -2540,30 +2552,6 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst, ...@@ -2540,30 +2552,6 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
bailout(kSimd, "i32x4_shri_u"); bailout(kSimd, "i32x4_shri_u");
} }
void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4_min_s");
}
void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4_min_u");
}
void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4_max_s");
}
void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4_max_u");
}
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs, LiftoffRegister lhs,
LiftoffRegister rhs) { LiftoffRegister rhs) {
...@@ -2675,30 +2663,6 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst, ...@@ -2675,30 +2663,6 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u"); bailout(kUnsupportedArchitecture, "emit_i16x8addsaturate_u");
} }
void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_min_s");
}
void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_min_u");
}
void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_max_s");
}
void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8_max_u");
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s"); bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
...@@ -2828,30 +2792,6 @@ void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst, ...@@ -2828,30 +2792,6 @@ void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u"); bailout(kUnsupportedArchitecture, "emit_i8x16addsaturate_u");
} }
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_min_s");
}
void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_min_u");
}
void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_max_s");
}
void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16_max_u");
}
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) { const uint8_t imms[16]) {
bailout(kUnsupportedArchitecture, "emit_s128_const"); bailout(kUnsupportedArchitecture, "emit_s128_const");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment