Commit c8d60d7e authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[liftoff][arm64] Fix address computation for trap handling

This refactors the {GetMemOp} function once again:
Instead of computing (mem_start + (offset_reg + offset_imm)), do compute
((mem_start + offset_imm) + offset_reg). This avoids an overflow in
(offset_reg + offset_imm) when using 32-bit computations, which hides
OOB memory accesses when relying on the trap handler.

As a nice side-effect, this change makes the whole method a lot nicer to
read.

We also need to change {StoreTaggedPointer} now, which was relying on the
inner working of {GetMemOp}. The new version makes the semantics more
transparent at the cost of repeating some logic from (the previous version
of) {GetMemOp}.

R=jkummerow@chromium.org

Bug: v8:11955, chromium:1227465, v8:11951
Change-Id: Ia068ca7c4f7db89b81529edd3438b0e4eee7d23d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3015566
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75693}
parent 408f5927
......@@ -128,23 +128,14 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
Register offset, T offset_imm,
bool i64_offset = false) {
if (offset.is_valid()) {
if (offset_imm == 0) {
return i64_offset ? MemOperand(addr.X(), offset.X())
: MemOperand(addr.X(), offset.W(), UXTW);
}
DCHECK_GE(kMaxUInt32, offset_imm);
if (i64_offset) {
Register tmp = temps->AcquireX();
assm->Add(tmp, offset.X(), offset_imm);
return MemOperand(addr.X(), tmp);
} else {
Register tmp = temps->AcquireW();
assm->Add(tmp, offset.W(), offset_imm);
return MemOperand(addr.X(), tmp, UXTW);
}
if (!offset.is_valid()) return MemOperand(addr.X(), offset_imm);
Register effective_addr = addr.X();
if (offset_imm) {
effective_addr = temps->AcquireX();
assm->Add(effective_addr, addr.X(), offset_imm);
}
return MemOperand(addr.X(), offset_imm);
return i64_offset ? MemOperand(effective_addr, offset.X())
: MemOperand(effective_addr, offset.W(), UXTW);
}
// Certain load instructions do not support offset (register or immediate).
......@@ -470,11 +461,18 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier skip_write_barrier) {
// Store the value.
UseScratchRegisterScope temps(this);
MemOperand dst_op =
liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
Operand offset_op = offset_reg.is_valid() ? Operand(offset_reg.W(), UXTW)
: Operand(offset_imm);
// For the write barrier (below), we cannot have both an offset register and
// an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
// register, because that's needed in the MemOperand below.
if (offset_reg.is_valid() && offset_imm) {
Register effective_offset = temps.AcquireX();
Add(effective_offset.W(), offset_reg.W(), offset_imm);
offset_op = effective_offset;
}
StoreTaggedField(src.gp(), MemOperand(dst_addr.X(), offset_op));
if (skip_write_barrier || FLAG_disable_write_barriers) return;
......@@ -492,10 +490,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
&exit);
CallRecordWriteStubSaveRegisters(
dst_addr,
dst_op.IsRegisterOffset() ? Operand(dst_op.regoffset().X())
: Operand(dst_op.offset()),
RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
StubCallMode::kCallWasmRuntimeStub);
bind(&exit);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment