Commit 1da429fb authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

Reland "[wasm][memory64] Prepare Liftoff for ptrsize offsets"

This is a reland of 800307f6, with a
minimal fix for arm64 (uint64_t -> uintptr_t).

Original change's description:
> [wasm][memory64] Prepare Liftoff for ptrsize offsets
>
> This CL prepares the LiftoffAssembler interface for uintptr_t offsets.
> Many places can still only handle 32-bit values, but after this CL we can
> start storing the offsets as uintptr_t in the memory access immediates.
> Some TODOs are placed to extend code generation for 64-bit additions, if
> memory64 is enabled.
> All of this will be addressed in follow-up CLs.
>
> R=manoskouk@chromium.org
>
> Bug: v8:10949
> Change-Id: Id3b9b8aa555ab41f082ba012f4f8d80586c35b89
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2529452
> Commit-Queue: Clemens Backes <clemensb@chromium.org>
> Reviewed-by: Manos Koukoutos <manoskouk@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71236}

Bug: v8:10949
Tbr: manoskouk@chromium.org
Change-Id: I33a9676afbf84d2032b181de2afd745841575900
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2550663Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71307}
parent b42136ea
......@@ -72,7 +72,7 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
inline Register CalculateActualAddress(LiftoffAssembler* assm,
UseScratchRegisterScope* temps,
Register addr_reg, Register offset_reg,
int32_t offset_imm,
uintptr_t offset_imm,
Register result_reg = no_reg) {
if (offset_reg == no_reg && offset_imm == 0) {
if (result_reg == no_reg) {
......@@ -139,14 +139,14 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
void (Assembler::*op_with_carry)(Register, Register, const Operand&,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
LiftoffRegister lhs, int64_t imm) {
// The compiler allocated registers such that either {dst == lhs} or there is
// no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
(assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(sign_extend),
int32_t imm_low_word = static_cast<int32_t>(imm);
int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
(assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm_low_word), SetCC, al);
(assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(imm_high_word),
LeaveCC, al);
}
......@@ -1625,7 +1625,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
int64_t imm) {
liftoff::I64BinopI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
......@@ -2224,7 +2224,7 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
}
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
......
......@@ -99,6 +99,8 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
Register tmp = temps->AcquireW();
// TODO(clemensb): Do a 64-bit addition if memory64 is used.
DCHECK_GE(kMaxUInt32, offset_imm);
assm->Add(tmp, offset.W(), offset_imm);
return MemOperand(addr.X(), tmp, UXTW);
}
......@@ -375,7 +377,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
UseScratchRegisterScope temps(this);
......@@ -426,7 +428,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
UseScratchRegisterScope temps(this);
......@@ -466,7 +468,7 @@ namespace liftoff {
inline Register CalculateActualAddress(LiftoffAssembler* lasm,
Register addr_reg, Register offset_reg,
int32_t offset_imm,
uintptr_t offset_imm,
Register result_reg) {
DCHECK_NE(offset_reg, no_reg);
DCHECK_NE(addr_reg, no_reg);
......@@ -480,7 +482,7 @@ inline Register CalculateActualAddress(LiftoffAssembler* lasm,
enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LiftoffRegister value, LiftoffRegister result,
StoreType type, Binop op) {
LiftoffRegList pinned =
......@@ -577,7 +579,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
} // namespace liftoff
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
UseScratchRegisterScope temps(this);
Register src_reg = liftoff::CalculateActualAddress(
......@@ -604,7 +606,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
UseScratchRegisterScope temps(this);
Register dst_reg = liftoff::CalculateActualAddress(
......@@ -631,42 +633,42 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
type, liftoff::Binop::kXor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
......@@ -674,7 +676,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
LiftoffRegList pinned =
......@@ -965,7 +967,7 @@ I32_BINOP_I(i32_xor, Eor)
I32_SHIFTOP(i32_shl, Lsl)
I32_SHIFTOP(i32_sar, Asr)
I32_SHIFTOP(i32_shr, Lsr)
I64_BINOP_I(i64_add, Add)
I64_BINOP(i64_add, Add)
I64_BINOP(i64_sub, Sub)
I64_BINOP(i64_mul, Mul)
I64_BINOP_I(i64_and, And)
......@@ -1011,6 +1013,11 @@ FP64_UNOP(f64_sqrt, Fsqrt)
#undef I32_SHIFTOP
#undef I64_SHIFTOP
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
Add(dst.gp().X(), lhs.gp().X(), imm);
}
void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
Clz(dst.W(), src.W());
}
......@@ -1476,7 +1483,7 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
}
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
......
......@@ -1454,20 +1454,21 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
LiftoffRegister lhs, int64_t imm) {
// The compiler allocated registers such that either {dst == lhs} or there is
// no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
int32_t imm_low_word = static_cast<int32_t>(imm);
int32_t imm_high_word = static_cast<int32_t>(imm >> 32);
// First, compute the low half of the result.
if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp());
(assm->*op)(dst.low_gp(), Immediate(imm));
(assm->*op)(dst.low_gp(), Immediate(imm_low_word));
// Now compute the upper half.
if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
(assm->*op_with_carry)(dst.high_gp(), sign_extend);
(assm->*op_with_carry)(dst.high_gp(), imm_high_word);
}
} // namespace liftoff
......@@ -1477,7 +1478,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
int64_t imm) {
liftoff::OpWithCarryI<&Assembler::add, &Assembler::adc>(this, dst, lhs, imm);
}
......@@ -2628,7 +2629,7 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
} // namespace liftoff
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
......
......@@ -488,46 +488,47 @@ class LiftoffAssembler : public TurboAssembler {
inline void StoreTaggedPointer(Register dst_addr, int32_t offset_imm,
LiftoffRegister src, LiftoffRegList pinned);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
bool is_load_mem = false);
inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister src, StoreType type, LiftoffRegList pinned,
inline void Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src, StoreType type,
LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr,
bool is_store_mem = false);
inline void AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned);
inline void AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned);
inline void AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type);
inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
uintptr_t offset_imm,
LiftoffRegister expected,
LiftoffRegister new_value,
LiftoffRegister value, StoreType type);
......@@ -589,7 +590,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm);
int64_t imm);
inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -667,11 +668,11 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_addi(Register dst, Register lhs, int32_t imm) {
inline void emit_ptrsize_addi(Register dst, Register lhs, intptr_t imm) {
if (kSystemPointerSize == 8) {
emit_i64_addi(LiftoffRegister(dst), LiftoffRegister(lhs), imm);
} else {
emit_i32_addi(dst, lhs, imm);
emit_i32_addi(dst, lhs, static_cast<int32_t>(imm));
}
}
......@@ -767,7 +768,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister false_value, ValueType type);
inline void LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LoadTransformationKind transform,
uint32_t* protected_load_pc);
inline void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
......@@ -1234,28 +1235,28 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
LiftoffRegister lhs, int64_t imm) {
int32_t low_word = static_cast<int32_t>(imm);
int32_t high_word = static_cast<int32_t>(imm >> 32);
// If {dst.low_gp()} does not overlap with {lhs.high_gp()},
// just first compute the lower half, then the upper half.
if (dst.low() != lhs.high()) {
(assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
(assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
(assm->*op)(dst.low_gp(), lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
return;
}
// If {dst.high_gp()} does not overlap with {lhs.low_gp()},
// we can compute this the other way around.
if (dst.high() != lhs.low()) {
(assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
(assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
(assm->*op)(dst.low_gp(), lhs.low_gp(), low_word);
return;
}
// Otherwise, we need a temporary register.
Register tmp =
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), imm);
(assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
(assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
assm->Move(dst.low_gp(), tmp, kWasmI32);
}
} // namespace liftoff
......
......@@ -2150,7 +2150,7 @@ class LiftoffCompiler {
}
void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
uint32_t offset, Register index,
uintptr_t offset, Register index,
LiftoffRegList pinned) {
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnalignedAccess, 0);
......@@ -2166,16 +2166,16 @@ class LiftoffCompiler {
// Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
return;
}
__ emit_i32_addi(address, index, offset);
} else {
// For alignment checks we only look at the lower 32-bits in {offset}.
__ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
__ emit_i32_andi(address, address, align_mask);
__ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
}
}
void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
Register index, uint32_t offset,
Register index, uintptr_t offset,
WasmCodePosition position) {
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
......@@ -2184,7 +2184,9 @@ class LiftoffCompiler {
// Get one register for computing the effective offset (offset + index).
LiftoffRegister effective_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(effective_offset, WasmValue(offset));
// TODO(clemensb): Do a 64-bit addition here if memory64 is used.
DCHECK_GE(kMaxUInt32, offset);
__ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
__ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
// Get a register to hold the stack slot for MemoryTracingInfo.
......@@ -2229,7 +2231,7 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
Register AddMemoryMasking(Register index, uint32_t* offset,
Register AddMemoryMasking(Register index, uintptr_t* offset,
LiftoffRegList* pinned) {
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
return index;
......@@ -2240,12 +2242,15 @@ class LiftoffCompiler {
Register old_index = index;
pinned->clear(LiftoffRegister(old_index));
index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
// TODO(clemensb): Use kWasmI64 if memory64 is used.
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
__ emit_ptrsize_addi(index, index, *offset);
// TODO(clemensb): Use 64-bit operations if memory64 is used.
DCHECK_GE(kMaxUInt32, *offset);
__ emit_i32_addi(index, index, static_cast<uint32_t>(*offset));
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
__ emit_i32_and(index, index, tmp);
*offset = 0;
return index;
}
......@@ -2262,7 +2267,7 @@ class LiftoffCompiler {
kDontForceCheck)) {
return;
}
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......@@ -2307,7 +2312,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load with transformation");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
......@@ -2353,7 +2358,7 @@ class LiftoffCompiler {
kDontForceCheck)) {
return;
}
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......@@ -3082,7 +3087,7 @@ class LiftoffCompiler {
return;
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......@@ -3106,7 +3111,7 @@ class LiftoffCompiler {
return;
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
......@@ -3125,7 +3130,7 @@ class LiftoffCompiler {
void AtomicBinop(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
void (LiftoffAssembler::*emit_fn)(Register, Register,
uint32_t, LiftoffRegister,
uintptr_t, LiftoffRegister,
LiftoffRegister,
StoreType)) {
ValueType result_type = type.value_type();
......@@ -3154,7 +3159,7 @@ class LiftoffCompiler {
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
......@@ -3179,7 +3184,7 @@ class LiftoffCompiler {
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
......@@ -3211,7 +3216,7 @@ class LiftoffCompiler {
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
......@@ -3247,17 +3252,16 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: index_reg;
if (offset) {
__ emit_i32_addi(index_plus_offset, index_reg, offset);
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
} else {
// TODO(clemensb): Skip this if memory is 64 bit.
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
if (offset) {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
LiftoffAssembler::VarState timeout =
......@@ -3324,17 +3328,16 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
index_reg, pinned);
uint32_t offset = imm.offset;
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: index_reg;
if (offset) {
__ emit_i32_addi(index_plus_offset, index_reg, offset);
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
} else {
// TODO(clemensb): Skip this if memory is 64 bit.
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
if (offset) {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
......
......@@ -43,17 +43,16 @@ inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uint32_t offset_imm) {
uintptr_t offset_imm) {
if (is_uint31(offset_imm)) {
if (offset == no_reg) return Operand(addr, offset_imm);
return Operand(addr, offset, times_1, offset_imm);
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
return offset == no_reg ? Operand(addr, offset_imm32)
: Operand(addr, offset, times_1, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
assm->movl(scratch, Immediate(offset_imm));
if (offset != no_reg) {
assm->addq(scratch, offset);
}
assm->Set(scratch, offset_imm);
if (offset != no_reg) assm->addq(scratch, offset);
return Operand(addr, scratch, times_1, 0);
}
......@@ -306,13 +305,13 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
if (emit_debug_code() && offset_reg != no_reg) {
......@@ -364,7 +363,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList /* pinned */,
uint32_t* protected_store_pc, bool is_store_mem) {
if (emit_debug_code() && offset_reg != no_reg) {
......@@ -401,7 +400,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
......@@ -434,7 +433,7 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!cache_state()->is_used(result));
if (cache_state()->is_used(value)) {
......@@ -479,7 +478,7 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!cache_state()->is_used(result));
if (cache_state()->is_used(value)) {
......@@ -537,7 +536,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!__ cache_state()->is_used(result));
Register value_reg = value.gp();
......@@ -614,28 +613,28 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
} // namespace liftoff
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::andl, &Assembler::andq, dst_addr,
offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::orl, &Assembler::orq, dst_addr,
offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
liftoff::AtomicBinop(this, &Assembler::xorl, &Assembler::xorq, dst_addr,
offset_reg, offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK(!cache_state()->is_used(result));
......@@ -680,7 +679,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
Register value_reg = new_value.gp();
......@@ -944,6 +943,7 @@ void EmitCommutativeBinOpImm(LiftoffAssembler* assm, Register dst, Register lhs,
if (dst != lhs) (assm->*mov)(dst, lhs);
(assm->*op)(dst, Immediate(imm));
}
} // namespace liftoff
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
......@@ -1187,11 +1187,18 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
if (lhs.gp() != dst.gp()) {
leaq(dst.gp(), Operand(lhs.gp(), imm));
int64_t imm) {
if (!is_int32(imm)) {
TurboAssembler::Set(kScratchRegister, imm);
if (lhs.gp() == dst.gp()) {
addq(dst.gp(), kScratchRegister);
} else {
leaq(dst.gp(), Operand(lhs.gp(), kScratchRegister, times_1, 0));
}
} else if (lhs.gp() == dst.gp()) {
addq(dst.gp(), Immediate(static_cast<int32_t>(imm)));
} else {
addq(dst.gp(), Immediate(imm));
leaq(dst.gp(), Operand(lhs.gp(), static_cast<int32_t>(imm)));
}
}
......@@ -2264,7 +2271,7 @@ inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
} // namespace liftoff
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
Register offset_reg, uintptr_t offset_imm,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment