Commit 4765c70f authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][arm][arm64][liftoff] Allow loads from negative indices

On arm, the root register points into the middle of the roots array to
allow to use the full int12_t offset range. Therefore some offsets into
the root array are negative. This CL changes the liftoff assembler for
arm to allow loads from negative offsets.

On arm64, offsets can also be negative when pointer compression is
disabled.

Additionally this CL changes the signature of
LiftoffAssembler::LoadTaggedPointer from uint32_t to int32_t to allow
the LiftoffCompiler to provide negative indices.

This CL does not come with a separate test yet. However, this changes is
needed for https://crrev.com/c/2352784. Where there will also be a test.

R=thibaudm@chromium.org

Bug: v8:7581
Change-Id: I0a97a62ff8e934d45a4494adfbc74a3e1149c8c1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2359429Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69453}
parent 8e8b2772
......@@ -554,112 +554,128 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
LoadType::kI32Load, pinned);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
namespace liftoff {
#define __ lasm->
inline void LoadInternal(LiftoffAssembler* lasm, LiftoffRegister dst,
Register src_addr, Register offset_reg,
int32_t offset_imm, LoadType type,
LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
bool is_load_mem = false) {
DCHECK_IMPLIES(type.value_type() == kWasmI64, dst.is_gp_pair());
// If offset_imm cannot be converted to int32 safely, we abort as a separate
// check should cause this code to never be executed.
// TODO(7881): Support when >2GB is required.
if (!is_uint31(offset_imm)) {
TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
return;
}
UseScratchRegisterScope temps(this);
UseScratchRegisterScope temps(lasm);
if (type.value() == LoadType::kF64Load ||
type.value() == LoadType::kF32Load ||
type.value() == LoadType::kS128Load) {
Register actual_src_addr = liftoff::CalculateActualAddress(
this, &temps, src_addr, offset_reg, offset_imm);
lasm, &temps, src_addr, offset_reg, offset_imm);
if (type.value() == LoadType::kF64Load) {
// Armv6 is not supported so Neon can be used to avoid alignment issues.
CpuFeatureScope scope(this, NEON);
vld1(Neon64, NeonListOperand(dst.fp()), NeonMemOperand(actual_src_addr));
CpuFeatureScope scope(lasm, NEON);
__ vld1(Neon64, NeonListOperand(dst.fp()),
NeonMemOperand(actual_src_addr));
} else if (type.value() == LoadType::kF32Load) {
// TODO(arm): Use vld1 for f32 when implemented in simulator as used for
// f64. It supports unaligned access.
Register scratch =
(actual_src_addr == src_addr) ? temps.Acquire() : actual_src_addr;
ldr(scratch, MemOperand(actual_src_addr));
vmov(liftoff::GetFloatRegister(dst.fp()), scratch);
__ ldr(scratch, MemOperand(actual_src_addr));
__ vmov(liftoff::GetFloatRegister(dst.fp()), scratch);
} else {
// Armv6 is not supported so Neon can be used to avoid alignment issues.
CpuFeatureScope scope(this, NEON);
vld1(Neon8, NeonListOperand(dst.low_fp(), 2),
NeonMemOperand(actual_src_addr));
CpuFeatureScope scope(lasm, NEON);
__ vld1(Neon8, NeonListOperand(dst.low_fp(), 2),
NeonMemOperand(actual_src_addr));
}
} else {
MemOperand src_op =
liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
liftoff::GetMemOp(lasm, &temps, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = __ pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
ldrb(dst.gp(), src_op);
__ ldrb(dst.gp(), src_op);
break;
case LoadType::kI64Load8U:
ldrb(dst.low_gp(), src_op);
mov(dst.high_gp(), Operand(0));
__ ldrb(dst.low_gp(), src_op);
__ mov(dst.high_gp(), Operand(0));
break;
case LoadType::kI32Load8S:
ldrsb(dst.gp(), src_op);
__ ldrsb(dst.gp(), src_op);
break;
case LoadType::kI64Load8S:
ldrsb(dst.low_gp(), src_op);
asr(dst.high_gp(), dst.low_gp(), Operand(31));
__ ldrsb(dst.low_gp(), src_op);
__ asr(dst.high_gp(), dst.low_gp(), Operand(31));
break;
case LoadType::kI32Load16U:
ldrh(dst.gp(), src_op);
__ ldrh(dst.gp(), src_op);
break;
case LoadType::kI64Load16U:
ldrh(dst.low_gp(), src_op);
mov(dst.high_gp(), Operand(0));
__ ldrh(dst.low_gp(), src_op);
__ mov(dst.high_gp(), Operand(0));
break;
case LoadType::kI32Load16S:
ldrsh(dst.gp(), src_op);
__ ldrsh(dst.gp(), src_op);
break;
case LoadType::kI32Load:
ldr(dst.gp(), src_op);
__ ldr(dst.gp(), src_op);
break;
case LoadType::kI64Load16S:
ldrsh(dst.low_gp(), src_op);
asr(dst.high_gp(), dst.low_gp(), Operand(31));
__ ldrsh(dst.low_gp(), src_op);
__ asr(dst.high_gp(), dst.low_gp(), Operand(31));
break;
case LoadType::kI64Load32U:
ldr(dst.low_gp(), src_op);
mov(dst.high_gp(), Operand(0));
__ ldr(dst.low_gp(), src_op);
__ mov(dst.high_gp(), Operand(0));
break;
case LoadType::kI64Load32S:
ldr(dst.low_gp(), src_op);
asr(dst.high_gp(), dst.low_gp(), Operand(31));
__ ldr(dst.low_gp(), src_op);
__ asr(dst.high_gp(), dst.low_gp(), Operand(31));
break;
case LoadType::kI64Load:
ldr(dst.low_gp(), src_op);
__ ldr(dst.low_gp(), src_op);
// GetMemOp may use a scratch register as the offset register, in which
// case, calling GetMemOp again will fail due to the assembler having
// ran out of scratch registers.
if (temps.CanAcquire()) {
src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
src_op = liftoff::GetMemOp(lasm, &temps, src_addr, offset_reg,
offset_imm + kSystemPointerSize);
} else {
add(src_op.rm(), src_op.rm(), Operand(kSystemPointerSize));
__ add(src_op.rm(), src_op.rm(), Operand(kSystemPointerSize));
}
ldr(dst.high_gp(), src_op);
__ ldr(dst.high_gp(), src_op);
break;
default:
UNREACHABLE();
}
}
}
#undef __
} // namespace liftoff
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
liftoff::LoadInternal(this, LiftoffRegister(dst), src_addr, offset_reg,
offset_imm, LoadType::kI32Load, pinned);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
// If offset_imm cannot be converted to int32 safely, we abort as a separate
// check should cause this code to never be executed.
// TODO(7881): Support when >2GB is required.
if (!is_uint31(offset_imm)) {
TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
return;
}
liftoff::LoadInternal(this, dst, src_addr, offset_reg,
static_cast<int32_t>(offset_imm), type, pinned,
protected_load_pc, is_load_mem);
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
......
......@@ -88,11 +88,10 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
}
}
template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
Register offset, uint32_t offset_imm) {
// Wasm memory is limited to a size <4GB.
DCHECK(is_uint32(offset_imm));
Register offset, T offset_imm) {
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
Register tmp = temps->AcquireW();
......@@ -335,7 +334,7 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
int32_t offset_imm,
LiftoffRegList pinned) {
UseScratchRegisterScope temps(this);
MemOperand src_op =
......
......@@ -279,11 +279,12 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
int32_t offset_imm,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
STATIC_ASSERT(kTaggedSize == kInt32Size);
Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
LoadType::kI32Load, pinned);
Load(LiftoffRegister(dst), src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), LoadType::kI32Load, pinned);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -479,7 +479,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, int32_t offset_imm,
LiftoffRegList pinned);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
......
......@@ -387,11 +387,12 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
int32_t offset_imm,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
STATIC_ASSERT(kTaggedSize == kInt32Size);
Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
LoadType::kI32Load, pinned);
Load(LiftoffRegister(dst), src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), LoadType::kI32Load, pinned);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -351,11 +351,12 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
int32_t offset_imm,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
STATIC_ASSERT(kTaggedSize == kInt64Size);
Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
LoadType::kI64Load, pinned);
Load(LiftoffRegister(dst), src_addr, offset_reg,
static_cast<uint32_t>(offset_imm), LoadType::kI64Load, pinned);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
......
......@@ -260,12 +260,14 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
int32_t offset_imm,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg,
static_cast<uint32_t>(offset_imm));
LoadTaggedPointerField(dst, src_op);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment