Commit 0f294a8f authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

S390 [liftoff]: Implement simd f64x2 unops

Implementations are added to macro-assembler to be shared between
liftoff and code generator.

Change-Id: Ibe326a80f71cad41dadbb62ebbcb9b8797f1871f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3384540Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#78593}
parent 0939c67e
......@@ -5228,6 +5228,23 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0));
}
#define SIMD_UNOP_LIST_VRR_A(V) \
V(F64x2Abs, vfpso, 2, 0, 3) \
V(F64x2Neg, vfpso, 0, 0, 3) \
V(F64x2Sqrt, vfsq, 0, 0, 3) \
V(F64x2Ceil, vfi, 6, 0, 3) \
V(F64x2Floor, vfi, 7, 0, 3) \
V(F64x2Trunc, vfi, 5, 0, 3) \
V(F64x2NearestInt, vfi, 4, 0, 3)
#define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \
void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \
op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \
}
SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A)
#undef EMIT_SIMD_UNOP_VRR_A
#undef SIMD_UNOP_LIST_VRR_A
#define SIMD_BINOP_LIST_VRR_B(V) \
V(I64x2Eq, vceq, 0, 3) \
V(I64x2GtS, vch, 0, 3) \
......
......@@ -1106,6 +1106,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx);
#define SIMD_UNOP_LIST(V) \
V(F64x2Abs) \
V(F64x2Neg) \
V(F64x2Sqrt) \
V(F64x2Ceil) \
V(F64x2Floor) \
V(F64x2Trunc) \
V(F64x2NearestInt)
#define PROTOTYPE_SIMD_UNOP(name) \
void name(Simd128Register dst, Simd128Register src);
SIMD_UNOP_LIST(PROTOTYPE_SIMD_UNOP)
#undef PROTOTYPE_SIMD_UNOP
#undef SIMD_UNOP_LIST
#define SIMD_BINOP_LIST(V) \
V(F64x2Add, Simd128Register) \
V(F64x2Sub, Simd128Register) \
......
......@@ -2643,18 +2643,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef EMIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
#define SIMD_UNOP_LIST(V) \
V(F64x2Splat, F64x2Splat, Simd128Register, DoubleRegister) \
V(F32x4Splat, F32x4Splat, Simd128Register, DoubleRegister) \
V(I64x2Splat, I64x2Splat, Simd128Register, Register) \
V(I32x4Splat, I32x4Splat, Simd128Register, Register) \
V(I16x8Splat, I16x8Splat, Simd128Register, Register) \
V(I8x16Splat, I8x16Splat, Simd128Register, Register)
#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
case kS390_##name: { \
__ op(i.Output##dtype(), i.Input##stype(0)); \
break; \
#define SIMD_UNOP_LIST(V) \
V(F64x2Splat, Simd128Register, DoubleRegister) \
V(F32x4Splat, Simd128Register, DoubleRegister) \
V(I64x2Splat, Simd128Register, Register) \
V(I32x4Splat, Simd128Register, Register) \
V(I16x8Splat, Simd128Register, Register) \
V(I8x16Splat, Simd128Register, Register) \
V(F64x2Abs, Simd128Register, Simd128Register) \
V(F64x2Neg, Simd128Register, Simd128Register) \
V(F64x2Sqrt, Simd128Register, Simd128Register) \
V(F64x2Ceil, Simd128Register, Simd128Register) \
V(F64x2Floor, Simd128Register, Simd128Register) \
V(F64x2Trunc, Simd128Register, Simd128Register) \
V(F64x2NearestInt, Simd128Register, Simd128Register)
#define EMIT_SIMD_UNOP(name, dtype, stype) \
case kS390_##name: { \
__ name(i.Output##dtype(), i.Input##stype(0)); \
break; \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
......@@ -2742,21 +2749,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector unary ops
case kS390_F64x2Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
Condition(2), Condition(0), Condition(3));
break;
}
case kS390_F64x2Neg: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
Condition(0), Condition(0), Condition(3));
break;
}
case kS390_F64x2Sqrt: {
__ vfsq(i.OutputSimd128Register(), i.InputSimd128Register(0),
Condition(0), Condition(0), Condition(3));
break;
}
case kS390_F32x4Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
Condition(2), Condition(0), Condition(2));
......@@ -3274,26 +3266,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(3));
break;
}
case kS390_F64x2Ceil: {
__ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
Condition(0), Condition(3));
break;
}
case kS390_F64x2Floor: {
__ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
Condition(0), Condition(3));
break;
}
case kS390_F64x2Trunc: {
__ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
Condition(0), Condition(3));
break;
}
case kS390_F64x2NearestInt: {
__ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
Condition(0), Condition(3));
break;
}
case kS390_F32x4Ceil: {
__ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
Condition(0), Condition(2));
......
......@@ -2376,18 +2376,26 @@ SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
#undef EMIT_SIMD_BINOP_RI
#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
V(f32x4_splat, F32x4Splat, fp, fp) \
V(i64x2_splat, I64x2Splat, fp, gp) \
V(i32x4_splat, I32x4Splat, fp, gp) \
V(i16x8_splat, I16x8Splat, fp, gp) \
V(i8x16_splat, I8x16Splat, fp, gp)
#define EMIT_SIMD_UNOP(name, op, dtype, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, \
LiftoffRegister src) { \
op(dst.dtype(), src.stype()); \
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp, , void) \
V(f32x4_splat, F32x4Splat, fp, fp, , void) \
V(i64x2_splat, I64x2Splat, fp, gp, , void) \
V(i32x4_splat, I32x4Splat, fp, gp, , void) \
V(i16x8_splat, I16x8Splat, fp, gp, , void) \
V(i8x16_splat, I8x16Splat, fp, gp, , void) \
V(f64x2_abs, F64x2Abs, fp, fp, , void) \
V(f64x2_neg, F64x2Neg, fp, fp, , void) \
V(f64x2_sqrt, F64x2Sqrt, fp, fp, , void) \
V(f64x2_ceil, F64x2Ceil, fp, fp, true, bool) \
V(f64x2_floor, F64x2Floor, fp, fp, true, bool) \
V(f64x2_trunc, F64x2Trunc, fp, fp, true, bool) \
V(f64x2_nearest_int, F64x2NearestInt, fp, fp, true, bool)
#define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
LiftoffRegister src) { \
op(dst.dtype(), src.stype()); \
return return_val; \
}
SIMD_UNOP_LIST(EMIT_SIMD_UNOP)
#undef EMIT_SIMD_UNOP
......@@ -2458,45 +2466,6 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16_swizzle");
}
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2_abs");
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2neg");
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2sqrt");
}
bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.ceil");
return true;
}
bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.floor");
return true;
}
bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.trunc");
return true;
}
bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.nearest_int");
return true;
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "pmin unimplemented");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment