Commit f89869a2 authored by Zhi An Ng's avatar Zhi An Ng Committed by Commit Bot

[wasm-simd][liftoff][ia32][x64] Implement v128.load_zero

Implement v128.load32_zero and v128.load64_zero on Liftoff, only for
ia32 and x64. ARM will follow.

Bug: v8:11038
Change-Id: I0fad054f462e27eb60825258dad385244b5e5a95
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2486236
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70782}
parent f13641d3
......@@ -2257,6 +2257,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
NeonMemOperand(actual_src_addr));
vmovl(NeonU32, liftoff::GetSimd128Register(dst), dst.low_fp());
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
bailout(kSimd, "v128.load_zero unimplemented");
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
......
......@@ -1505,6 +1505,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Ldr(dst.fp().D(), src_op);
Uxtl(dst.fp().V2D(), dst.fp().V2S());
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
bailout(kSimd, "v128.load_zero unimplemented");
} else {
// ld1r only allows no offset or post-index, so emit an add.
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
......
......@@ -2663,6 +2663,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
......
......@@ -2291,15 +2291,11 @@ class LiftoffCompiler {
return;
}
if (transform == LoadTransformationKind::kZeroExtend) {
unsupported(decoder, kSimd, "prototyping s128 load zero extend");
return;
}
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister()).gp();
// For load splats, LoadType is the size of the load, and for load
// extends, LoadType is the size of the lane, and it always loads 8 bytes.
// For load splats and load zero, LoadType is the size of the load, and for
// load extends, LoadType is the size of the lane, and it always loads 8
// bytes.
uint32_t access_size =
transform == LoadTransformationKind::kExtend ? 8 : type.size();
if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
......
......@@ -2287,6 +2287,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else if (memtype == MachineType::Uint32()) {
Pmovzxdq(dst.fp(), src_op);
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
Movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
Movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment