Commit 28d4ccb6 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [liftoff]: Implement simd integer unops

Implementations are added to macro-assembler to be shared between
liftoff and code generator.

Change-Id: Ic38677b3266399e5e170a4b2d6a8f90d0b830d47
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3389090Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#78650}
parent 1ccf7663
...@@ -5242,7 +5242,15 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, ...@@ -5242,7 +5242,15 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
V(F32x4Ceil, vfi, 6, 0, 2) \ V(F32x4Ceil, vfi, 6, 0, 2) \
V(F32x4Floor, vfi, 7, 0, 2) \ V(F32x4Floor, vfi, 7, 0, 2) \
V(F32x4Trunc, vfi, 5, 0, 2) \ V(F32x4Trunc, vfi, 5, 0, 2) \
V(F32x4NearestInt, vfi, 4, 0, 2) V(F32x4NearestInt, vfi, 4, 0, 2) \
V(I64x2Abs, vlp, 0, 0, 3) \
V(I32x4Abs, vlp, 0, 0, 2) \
V(I16x8Abs, vlp, 0, 0, 1) \
V(I8x16Abs, vlp, 0, 0, 0) \
V(I64x2Neg, vlc, 0, 0, 3) \
V(I32x4Neg, vlc, 0, 0, 2) \
V(I16x8Neg, vlc, 0, 0, 1) \
V(I8x16Neg, vlc, 0, 0, 0)
#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \ #define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \ void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \
......
...@@ -1120,7 +1120,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1120,7 +1120,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(F32x4Ceil) \ V(F32x4Ceil) \
V(F32x4Floor) \ V(F32x4Floor) \
V(F32x4Trunc) \ V(F32x4Trunc) \
V(F32x4NearestInt) V(F32x4NearestInt) \
V(I64x2Abs) \
V(I32x4Abs) \
V(I16x8Abs) \
V(I8x16Abs) \
V(I64x2Neg) \
V(I32x4Neg) \
V(I16x8Neg) \
V(I8x16Neg)
#define PROTOTYPE_SIMD_UNOP(name) \ #define PROTOTYPE_SIMD_UNOP(name) \
void name(Simd128Register dst, Simd128Register src); void name(Simd128Register dst, Simd128Register src);
......
...@@ -2663,7 +2663,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2663,7 +2663,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
V(F32x4Ceil, Simd128Register, Simd128Register) \ V(F32x4Ceil, Simd128Register, Simd128Register) \
V(F32x4Floor, Simd128Register, Simd128Register) \ V(F32x4Floor, Simd128Register, Simd128Register) \
V(F32x4Trunc, Simd128Register, Simd128Register) \ V(F32x4Trunc, Simd128Register, Simd128Register) \
V(F32x4NearestInt, Simd128Register, Simd128Register) V(F32x4NearestInt, Simd128Register, Simd128Register) \
V(I64x2Abs, Simd128Register, Simd128Register) \
V(I32x4Abs, Simd128Register, Simd128Register) \
V(I16x8Abs, Simd128Register, Simd128Register) \
V(I8x16Abs, Simd128Register, Simd128Register) \
V(I64x2Neg, Simd128Register, Simd128Register) \
V(I32x4Neg, Simd128Register, Simd128Register) \
V(I16x8Neg, Simd128Register, Simd128Register) \
V(I8x16Neg, Simd128Register, Simd128Register)
#define EMIT_SIMD_UNOP(name, dtype, stype) \ #define EMIT_SIMD_UNOP(name, dtype, stype) \
case kS390_##name: { \ case kS390_##name: { \
...@@ -2756,26 +2764,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2756,26 +2764,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
// vector unary ops // vector unary ops
case kS390_I64x2Neg: {
__ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(3));
break;
}
case kS390_I32x4Neg: {
__ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(2));
break;
}
case kS390_I16x8Neg: {
__ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(1));
break;
}
case kS390_I8x16Neg: {
__ vlc(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(0));
break;
}
case kS390_F32x4RecipApprox: { case kS390_F32x4RecipApprox: {
__ mov(kScratchReg, Operand(1)); __ mov(kScratchReg, Operand(1));
__ ConvertIntToFloat(kScratchDoubleReg, kScratchReg); __ ConvertIntToFloat(kScratchDoubleReg, kScratchReg);
...@@ -2802,26 +2790,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2802,26 +2790,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vno(dst, src, src, Condition(0), Condition(0), Condition(0)); __ vno(dst, src, src, Condition(0), Condition(0), Condition(0));
break; break;
} }
case kS390_I8x16Abs: {
__ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(0));
break;
}
case kS390_I16x8Abs: {
__ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(1));
break;
}
case kS390_I32x4Abs: {
__ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(2));
break;
}
case kS390_I64x2Abs: {
__ vlp(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(0),
Condition(0), Condition(3));
break;
}
// vector boolean unops // vector boolean unops
case kS390_V128AnyTrue: { case kS390_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
......
...@@ -2396,7 +2396,15 @@ SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI) ...@@ -2396,7 +2396,15 @@ SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool) \ V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool) \
V(f32x4_floor, F32x4Floor, fp, fp, true, bool) \ V(f32x4_floor, F32x4Floor, fp, fp, true, bool) \
V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool) \ V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool) \
V(f32x4_nearest_int, F32x4NearestInt, fp, fp, true, bool) V(f32x4_nearest_int, F32x4NearestInt, fp, fp, true, bool) \
V(i64x2_abs, I64x2Abs, fp, fp, , void) \
V(i32x4_abs, I32x4Abs, fp, fp, , void) \
V(i16x8_abs, I16x8Abs, fp, fp, , void) \
V(i8x16_abs, I8x16Abs, fp, fp, , void) \
V(i64x2_neg, I64x2Neg, fp, fp, , void) \
V(i32x4_neg, I32x4Neg, fp, fp, , void) \
V(i16x8_neg, I16x8Neg, fp, fp, , void) \
V(i8x16_neg, I8x16Neg, fp, fp, , void)
#define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \ #define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \ return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
...@@ -2508,11 +2516,6 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -2508,11 +2516,6 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented"); bailout(kSimd, "pmax unimplemented");
} }
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "i64x2_alltrue"); bailout(kSimd, "i64x2_alltrue");
...@@ -2567,11 +2570,6 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst, ...@@ -2567,11 +2570,6 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported"); bailout(kSimd, "i64x2_extmul_high_i32x4_u unsupported");
} }
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "i32x4_alltrue"); bailout(kSimd, "i32x4_alltrue");
...@@ -2622,11 +2620,6 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst, ...@@ -2622,11 +2620,6 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported"); bailout(kSimd, "i32x4_extmul_high_i16x8_u unsupported");
} }
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "i16x8_alltrue"); bailout(kSimd, "i16x8_alltrue");
...@@ -2714,11 +2707,6 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst, ...@@ -2714,11 +2707,6 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
bailout(kSimd, "i8x16.popcnt"); bailout(kSimd, "i8x16.popcnt");
} }
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst, void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "v8x16_anytrue"); bailout(kSimd, "v8x16_anytrue");
...@@ -2906,26 +2894,6 @@ void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst, ...@@ -2906,26 +2894,6 @@ void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u"); bailout(kUnsupportedArchitecture, "emit_i16x8_rounding_average_u");
} }
void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16_abs");
}
void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i16x8_abs");
}
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
}
void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "i64x2.abs");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
LoadU64(limit_address, MemOperand(limit_address)); LoadU64(limit_address, MemOperand(limit_address));
CmpU64(sp, limit_address); CmpU64(sp, limit_address);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment