Commit 88d48c53 authored by Zhi An Ng's avatar Zhi An Ng Committed by Commit Bot

[wasm-simd][liftoff][arm64] Implement i64x2.bitmask

Move the current code sequence in TurboFan to a macro-assembler helper
function to allow Liftoff to reuse it.

Bug: v8:10997
Change-Id: I6205350897a4afc7ca9d0f84fd514be24508aef0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2620905Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72054}
parent 373f4ae7
...@@ -3408,6 +3408,16 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { ...@@ -3408,6 +3408,16 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
} }
void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
UseScratchRegisterScope scope(this);
VRegister tmp1 = scope.AcquireV(kFormat2D);
Register tmp2 = scope.AcquireX();
Ushr(tmp1.V2D(), src.V2D(), 63);
Mov(dst.X(), tmp1.D(), 0);
Mov(tmp2.X(), tmp1.D(), 1);
Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -1371,6 +1371,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -1371,6 +1371,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreReturnAddressInWasmExitFrame(Label* return_location); void StoreReturnAddressInWasmExitFrame(Label* return_location);
// Wasm SIMD helpers. These instructions don't have direct lowering to native
// instructions. These helpers allow us to define the optimal code sequence,
// and be used in both TurboFan and Liftoff.
void I64x2BitMask(Register dst, VRegister src);
protected: protected:
// The actual Push and Pop implementations. These don't generate any code // The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows // other than that required for the push or pop. This allows
......
...@@ -2222,15 +2222,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2222,15 +2222,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64I64x2BitMask: { case kArm64I64x2BitMask: {
UseScratchRegisterScope scope(tasm()); __ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
Register dst = i.OutputRegister32();
VRegister src = i.InputSimd128Register(0);
VRegister tmp1 = scope.AcquireV(kFormat2D);
Register tmp2 = scope.AcquireX();
__ Ushr(tmp1.V2D(), src.V2D(), 63);
__ Mov(dst.X(), tmp1.D(), 0);
__ Mov(tmp2.X(), tmp1.D(), 1);
__ Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1));
break; break;
} }
case kArm64I32x4Splat: { case kArm64I32x4Splat: {
......
...@@ -2014,7 +2014,7 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst, ...@@ -2014,7 +2014,7 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
bailout(kSimd, "i64x2_bitmask"); I64x2BitMask(dst.gp(), src.fp());
} }
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment