Commit 044a18ac authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[wasm] Fix 64-bit addressed loads on arm64

The {LiftoffAssembler::Load} method already receives an {i64_offset}
parameter which skips the UXTW (zero extension of 32-bit addresses) in
the memory operand. The same needs to happen on stores.

On 32-bit platforms, we cannot have addresses >=4GB anyway (they would
be detected as OOB before reaching the point in question), so this is
not a problem. On x64, all 32-bit registers are zero-extended already
(which is debug-checked in the generated code), so this is also no
problem (and we just ignore the additional parameter).

R=jkummerow@chromium.org

Bug: v8:10949
Change-Id: I3c2266dde1bf9d182b6759893f7f64540ae12261
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3791051
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82074}
parent 3decc4bb
......@@ -805,7 +805,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc,
bool /* is_store_mem */) {
bool /* is_store_mem */, bool /* i64_offset */) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
UseScratchRegisterScope temps(this);
......@@ -1382,7 +1382,8 @@ void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
class CacheStatePreservingTempRegister {
public:
CacheStatePreservingTempRegister(LiftoffAssembler& assm) : assm_(assm) {}
explicit CacheStatePreservingTempRegister(LiftoffAssembler& assm)
: assm_(assm) {}
~CacheStatePreservingTempRegister() {
if (must_pop_) assm_.Pop(reg_);
......
......@@ -579,10 +579,10 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList /* pinned */,
uint32_t* protected_store_pc,
bool /* is_store_mem */) {
bool /* is_store_mem */, bool i64_offset) {
UseScratchRegisterScope temps(this);
MemOperand dst_op =
liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
MemOperand dst_op = liftoff::GetMemOp(this, &temps, dst_addr, offset_reg,
offset_imm, i64_offset);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
case StoreType::kI32Store8:
......
......@@ -511,7 +511,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc,
bool /* is_store_mem */) {
bool /* is_store_mem */, bool /* i64_offset */) {
DCHECK_EQ(type.value_type() == kWasmI64, src.is_gp_pair());
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
......
......@@ -812,7 +812,7 @@ class LiftoffAssembler : public TurboAssembler {
uintptr_t offset_imm, LiftoffRegister src, StoreType type,
LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr,
bool is_store_mem = false);
bool is_store_mem = false, bool i64_offset = false);
inline void AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned);
......
......@@ -3093,16 +3093,19 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueKind kind = type.value_type().kind();
RegClass rc = reg_class_for(kind);
DCHECK_EQ(kind, result->type.kind());
if (!CheckSupportedType(decoder, kind, "load")) return;
uintptr_t offset = imm.offset;
Register index = no_reg;
RegClass rc = reg_class_for(kind);
// Only look at the slot, do not pop it yet (will happen in PopToRegister
// below, if this is not a statically-in-bounds index).
auto& index_slot = __ cache_state()->stack_state.back();
bool i64_offset = index_val.type == kWasmI64;
DCHECK_EQ(index_val.type.kind(), index_slot.kind());
DCHECK(index_slot.kind() == kI32 || index_slot.kind() == kI64);
bool i64_offset = index_slot.kind() == kI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
CODE_COMMENT("load from memory (constant offset)");
......@@ -3226,6 +3229,7 @@ class LiftoffCompiler {
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueKind kind = type.value_type().kind();
DCHECK_EQ(kind, value_val.type.kind());
if (!CheckSupportedType(decoder, kind, "store")) return;
LiftoffRegList pinned;
......@@ -3235,11 +3239,15 @@ class LiftoffCompiler {
Register index = no_reg;
auto& index_slot = __ cache_state()->stack_state.back();
DCHECK_EQ(index_val.type.kind(), index_slot.kind());
DCHECK(index_slot.kind() == kI32 || index_slot.kind() == kI64);
bool i64_offset = index_slot.kind() == kI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
CODE_COMMENT("store to memory (constant offset)");
Register mem = pinned.set(GetMemoryStart(pinned));
__ Store(mem, no_reg, offset, value, type, pinned, nullptr, true);
__ Store(mem, no_reg, offset, value, type, pinned, nullptr, true,
i64_offset);
} else {
LiftoffRegister full_index = __ PopToRegister(pinned);
index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
......@@ -3255,7 +3263,7 @@ class LiftoffCompiler {
LiftoffRegList outer_pinned;
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
__ Store(mem, index, offset, value, type, outer_pinned,
&protected_store_pc, true);
&protected_store_pc, true, i64_offset);
if (env_->bounds_checks == kTrapHandler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
......
......@@ -80,17 +80,18 @@ constexpr Operand kInstanceOperand = GetStackSlot(kInstanceOffset);
constexpr Operand kOSRTargetSlot = GetStackSlot(kOSRTargetOffset);
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uintptr_t offset_imm) {
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr,
Register offset_reg, uintptr_t offset_imm) {
if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
return offset == no_reg ? Operand(addr, offset_imm32)
: Operand(addr, offset, times_1, offset_imm32);
return offset_reg == no_reg
? Operand(addr, offset_imm32)
: Operand(addr, offset_reg, times_1, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
assm->TurboAssembler::Move(scratch, offset_imm);
if (offset != no_reg) assm->addq(scratch, offset);
if (offset_reg != no_reg) assm->addq(scratch, offset_reg);
return Operand(addr, scratch, times_1, 0);
}
......@@ -494,7 +495,10 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList /* pinned */,
uint32_t* protected_store_pc,
bool /* is_store_mem */) {
bool /* is_store_mem */, bool i64_offset) {
if (offset_reg != no_reg && !i64_offset) {
AssertZeroExtended(offset_reg);
}
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment