Commit 60abb1ec authored by Milad Farazmand's avatar Milad Farazmand Committed by Commit Bot

PPC: [wasm-simd] Use memory for splatting 64x2 lanes

Change-Id: Iaa0075ea16c289a369b354f2f44b3d1161f2faac
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2273581Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Milad Farazmand <miladfar@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#68597}
parent dec44545
...@@ -2155,18 +2155,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2155,18 +2155,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_TARGET_ARCH_PPC64 #endif // V8_TARGET_ARCH_PPC64
case kPPC_F64x2Splat: { case kPPC_F64x2Splat: {
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
constexpr int shift_bits = 64; __ MovDoubleToInt64(ip, i.InputDoubleRegister(0));
__ MovDoubleToInt64(r0, i.InputDoubleRegister(0)); // Need to maintain 16 byte alignment for lvx.
__ mtvsrd(dst, r0); __ addi(sp, sp, Operand(-24));
// right shift __ StoreP(ip, MemOperand(sp, 0));
__ li(ip, Operand(shift_bits)); __ StoreP(ip, MemOperand(sp, 8));
__ mtvsrd(kScratchDoubleReg, ip); __ li(r0, Operand(0));
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); __ lvx(dst, MemOperand(sp, r0));
__ vsro(dst, dst, kScratchDoubleReg); __ addi(sp, sp, Operand(24));
// reload
__ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, r0);
__ vor(dst, dst, kScratchDoubleReg);
break; break;
} }
case kPPC_F32x4Splat: { case kPPC_F32x4Splat: {
...@@ -2179,17 +2175,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2179,17 +2175,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_I64x2Splat: { case kPPC_I64x2Splat: {
Register src = i.InputRegister(0); Register src = i.InputRegister(0);
Simd128Register dst = i.OutputSimd128Register(); Simd128Register dst = i.OutputSimd128Register();
constexpr int shift_bits = 64; // Need to maintain 16 byte alignment for lvx.
__ mtvsrd(dst, src); __ addi(sp, sp, Operand(-24));
// right shift __ StoreP(src, MemOperand(sp, 0));
__ li(ip, Operand(shift_bits)); __ StoreP(src, MemOperand(sp, 8));
__ mtvsrd(kScratchDoubleReg, ip); __ li(r0, Operand(0));
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); __ lvx(dst, MemOperand(sp, r0));
__ vsro(dst, dst, kScratchDoubleReg); __ addi(sp, sp, Operand(24));
// reload
__ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, src);
__ vor(dst, dst, kScratchDoubleReg);
break; break;
} }
case kPPC_I32x4Splat: { case kPPC_I32x4Splat: {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment