Commit d5a94229 authored by Milad Fa's avatar Milad Fa Committed by V8 LUCI CQ

PPC[liftoff]: init simd fp unary operations

Change-Id: Ie5de6f4b7415c67eb77aa4b6e29764c595fca766
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3905123Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Commit-Queue: Milad Farazmand <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#83366}
parent 197471fa
...@@ -3768,8 +3768,16 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT) ...@@ -3768,8 +3768,16 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
#define SIMD_UNOP_LIST(V) \ #define SIMD_UNOP_LIST(V) \
V(F64x2Abs, xvabsdp) \ V(F64x2Abs, xvabsdp) \
V(F64x2Neg, xvnegdp) \ V(F64x2Neg, xvnegdp) \
V(F64x2Sqrt, xvsqrtdp) \
V(F64x2Ceil, xvrdpip) \
V(F64x2Floor, xvrdpim) \
V(F64x2Trunc, xvrdpiz) \
V(F32x4Abs, xvabssp) \ V(F32x4Abs, xvabssp) \
V(F32x4Neg, xvnegsp) \ V(F32x4Neg, xvnegsp) \
V(F32x4Sqrt, xvsqrtsp) \
V(F32x4Ceil, xvrspip) \
V(F32x4Floor, xvrspim) \
V(F32x4Trunc, xvrspiz) \
V(I64x2Neg, vnegd) \ V(I64x2Neg, vnegd) \
V(I32x4Neg, vnegw) V(I32x4Neg, vnegw)
......
...@@ -1163,8 +1163,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1163,8 +1163,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#define SIMD_UNOP_LIST(V) \ #define SIMD_UNOP_LIST(V) \
V(F64x2Abs) \ V(F64x2Abs) \
V(F64x2Neg) \ V(F64x2Neg) \
V(F64x2Sqrt) \
V(F64x2Ceil) \
V(F64x2Floor) \
V(F64x2Trunc) \
V(F32x4Abs) \ V(F32x4Abs) \
V(F32x4Neg) \ V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4Ceil) \
V(F32x4Floor) \
V(F32x4Trunc) \
V(I64x2Neg) \ V(I64x2Neg) \
V(I32x4Neg) V(I32x4Neg)
......
...@@ -2282,10 +2282,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2282,10 +2282,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#define SIMD_UNOP_LIST(V) \ #define SIMD_UNOP_LIST(V) \
V(F64x2Abs) \ V(F64x2Abs) \
V(F64x2Neg) \ V(F64x2Neg) \
V(F64x2Sqrt) \
V(F64x2Ceil) \
V(F64x2Floor) \
V(F64x2Trunc) \
V(F32x4Abs) \ V(F32x4Abs) \
V(F32x4Neg) \ V(F32x4Neg) \
V(I64x2Neg) \ V(I64x2Neg) \
V(I32x4Neg) V(I32x4Neg) \
V(F32x4Sqrt) \
V(F32x4Ceil) \
V(F32x4Floor) \
V(F32x4Trunc)
#define EMIT_SIMD_UNOP(name) \ #define EMIT_SIMD_UNOP(name) \
case kPPC_##name: { \ case kPPC_##name: { \
...@@ -2567,14 +2575,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2567,14 +2575,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vsel(dst, src2, src1, mask); __ vsel(dst, src2, src1, mask);
break; break;
} }
case kPPC_F64x2Sqrt: {
__ xvsqrtdp(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_F32x4Sqrt: {
__ xvsqrtsp(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_V128AnyTrue: { case kPPC_V128AnyTrue: {
Simd128Register src = i.InputSimd128Register(0); Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister(); Register dst = i.OutputRegister();
...@@ -2872,30 +2872,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2872,30 +2872,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vandc(dst, src, i.InputSimd128Register(1)); __ vandc(dst, src, i.InputSimd128Register(1));
break; break;
} }
case kPPC_F64x2Ceil: {
__ xvrdpip(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_F64x2Floor: {
__ xvrdpim(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_F64x2Trunc: {
__ xvrdpiz(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_F32x4Ceil: {
__ xvrspip(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_F32x4Floor: {
__ xvrspim(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_F32x4Trunc: {
__ xvrspiz(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_I64x2BitMask: { case kPPC_I64x2BitMask: {
if (CpuFeatures::IsSupported(PPC_10_PLUS)) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
__ vextractdm(i.OutputRegister(), i.InputSimd128Register(0)); __ vextractdm(i.OutputRegister(), i.InputSimd128Register(0));
......
...@@ -1879,10 +1879,18 @@ SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI) ...@@ -1879,10 +1879,18 @@ SIMD_SHIFT_RI_LIST(EMIT_SIMD_SHIFT_RI)
#define SIMD_UNOP_LIST(V) \ #define SIMD_UNOP_LIST(V) \
V(f64x2_abs, F64x2Abs, fp, fp, , void) \ V(f64x2_abs, F64x2Abs, fp, fp, , void) \
V(f64x2_neg, F64x2Neg, fp, fp, , void) \ V(f64x2_neg, F64x2Neg, fp, fp, , void) \
V(f64x2_sqrt, F64x2Sqrt, fp, fp, , void) \
V(f64x2_ceil, F64x2Ceil, fp, fp, true, bool) \
V(f64x2_floor, F64x2Floor, fp, fp, true, bool) \
V(f64x2_trunc, F64x2Trunc, fp, fp, true, bool) \
V(f32x4_abs, F32x4Abs, fp, fp, , void) \ V(f32x4_abs, F32x4Abs, fp, fp, , void) \
V(f32x4_neg, F32x4Neg, fp, fp, , void) \ V(f32x4_neg, F32x4Neg, fp, fp, , void) \
V(i64x2_neg, I64x2Neg, fp, fp, , void) \ V(i64x2_neg, I64x2Neg, fp, fp, , void) \
V(i32x4_neg, I32x4Neg, fp, fp, , void) V(i32x4_neg, I32x4Neg, fp, fp, , void) \
V(f32x4_sqrt, F32x4Sqrt, fp, fp, , void) \
V(f32x4_ceil, F32x4Ceil, fp, fp, true, bool) \
V(f32x4_floor, F32x4Floor, fp, fp, true, bool) \
V(f32x4_trunc, F32x4Trunc, fp, fp, true, bool)
#define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \ #define EMIT_SIMD_UNOP(name, op, dtype, stype, return_val, return_type) \
return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \ return_type LiftoffAssembler::emit_##name(LiftoffRegister dst, \
...@@ -2222,29 +2230,6 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst, ...@@ -2222,29 +2230,6 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst,
bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect"); bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect");
} }
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2sqrt");
}
bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.ceil");
return true;
}
bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.floor");
return true;
}
bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f64x2.trunc");
return true;
}
bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst, bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "f64x2.nearest_int"); bailout(kSimd, "f64x2.nearest_int");
...@@ -2288,29 +2273,6 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst, ...@@ -2288,29 +2273,6 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
bailout(kSimd, "f64x2.promote_low_f32x4"); bailout(kSimd, "f64x2.promote_low_f32x4");
} }
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4sqrt");
}
bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f32x4.ceil");
return true;
}
bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f32x4.floor");
return true;
}
bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "f32x4.trunc");
return true;
}
bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst, bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "f32x4.nearest_int"); bailout(kSimd, "f32x4.nearest_int");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment