Commit 4615655c authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][liftoff] Implement atomic load and store on arm

The CL also mooves the {ClearRegister} function to the
platform-independent LiftoffAssembler code.

R=clemensb@chromium.org

Bug: v8:10108
Change-Id: Ibf9f1829a525c859ad004636f678b82aa72d39ae
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2129637
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarJacob Bramley <jacob.bramley@arm.com>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67126}
parent 28bc2649
......@@ -548,13 +548,84 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicLoad");
if (type.value() != LoadType::kI64Load) {
Load(dst, src_addr, offset_reg, offset_imm, type, pinned, nullptr, true);
dmb(ISH);
return;
}
// ldrexd loads a 64 bit word into two registers. The first register needs to
// have an even index, e.g. r8, the second register needs to be the one with
// the next higher index, e.g. r9 if the first register is r8. In the
// following code we use the fixed register pair r8/r9 to make the code here
// simpler, even though other register pairs would also be possible.
constexpr Register dst_low = r8;
constexpr Register dst_high = r9;
if (cache_state()->is_used(LiftoffRegister(dst_low))) {
SpillRegister(LiftoffRegister(dst_low));
}
if (cache_state()->is_used(LiftoffRegister(dst_high))) {
SpillRegister(LiftoffRegister(dst_high));
}
UseScratchRegisterScope temps(this);
Register actual_addr = liftoff::CalculateActualAddress(
this, &temps, src_addr, offset_reg, offset_imm);
ldrexd(dst_low, dst_high, actual_addr);
dmb(ISH);
LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}};
ParallelRegisterMove(ArrayVector(reg_moves));
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
bailout(kAtomics, "AtomicStore");
if (type.value() != StoreType::kI64Store) {
dmb(ISH);
Store(dst_addr, offset_reg, offset_imm, src, type, pinned, nullptr, true);
dmb(ISH);
return;
}
// strexd loads a 64 bit word into two registers. The first register needs
// to have an even index, e.g. r8, the second register needs to be the one
// with the next higher index, e.g. r9 if the first register is r8. In the
// following code we use the fixed register pair r8/r9 to make the code here
// simpler, even though other register pairs would also be possible.
constexpr Register value_low = r8;
constexpr Register value_high = r9;
pinned.set(value_low);
pinned.set(value_high);
// We need r8/r9 below as temps as well, so we cannot allow {src} to be
// one of these registers.
Register src_low = src.low_gp();
Register src_high = src.high_gp();
ClearRegister(value_low, {&dst_addr, &offset_reg, &src_low, &src_high},
pinned);
ClearRegister(value_high, {&dst_addr, &offset_reg, &src_low, &src_high},
pinned);
UseScratchRegisterScope temps(this);
Register actual_addr = liftoff::CalculateActualAddress(
this, &temps, dst_addr, offset_reg, offset_imm);
Register result = temps.CanAcquire() ? temps.Acquire()
: GetUnusedRegister(kGpReg, pinned).gp();
dmb(ISH);
Label store;
bind(&store);
// {ldrexd} is needed here so that the {strexd} instruction below can
// succeed. We don't need the value we are reading. We use {value_low} and
// {value_high} as the destination registers because {ldrexd} has the same
// restrictions on registers as {strexd}, see the comment above.
ldrexd(value_low, value_high, actual_addr);
TurboAssembler::Move(value_low, src_low);
TurboAssembler::Move(value_high, src_high);
strexd(result, value_low, value_high, actual_addr);
cmp(result, Operand(0));
b(ne, &store);
dmb(ISH);
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
......
......@@ -627,6 +627,24 @@ void LiftoffAssembler::SpillAllRegisters() {
cache_state_.reset_used_registers();
}
void LiftoffAssembler::ClearRegister(
Register reg, std::initializer_list<Register*> possible_uses,
LiftoffRegList pinned) {
if (cache_state()->is_used(LiftoffRegister(reg))) {
SpillRegister(LiftoffRegister(reg));
}
Register replacement = no_reg;
for (Register* use : possible_uses) {
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp();
Move(replacement, reg, LiftoffAssembler::kWasmIntPtr);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
}
}
namespace {
void PrepareStackTransfers(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
......
......@@ -386,6 +386,14 @@ class LiftoffAssembler : public TurboAssembler {
void SpillLocals();
void SpillAllRegisters();
// Clear any uses of {reg} in both the cache and in {possible_uses}.
// Any use in the stack is spilled. If any register in {possible_uses} matches
// {reg}, then the content of {reg} is moved to a new temporary register, and
// all matches in {possible_uses} are rewritten to that temporary register.
void ClearRegister(Register reg,
std::initializer_list<Register*> possible_uses,
LiftoffRegList pinned);
// Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillOffset(int offset) {
......
......@@ -461,25 +461,6 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
namespace liftoff {
#define __ lasm->
// Checks if a register in {possible_uses} uses {reg}. If so, it allocates a
// replacement register for that use, and moves the content of {reg} to {use}.
// The replacement register is written into the pointer stored in
// {possible_uses}.
inline void ClearRegister(LiftoffAssembler* lasm, Register reg,
std::initializer_list<Register*> possible_uses,
LiftoffRegList pinned) {
liftoff::SpillRegisters(lasm, reg);
Register replacement = no_reg;
for (Register* use : possible_uses) {
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = __ GetUnusedRegister(kGpReg, pinned).gp();
__ movq(replacement, reg);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
}
}
inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
......@@ -494,7 +475,7 @@ inline void AtomicBinop(LiftoffAssembler* lasm,
// move any use to another register.
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
ClearRegister(lasm, rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
__ ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (__ emit_debug_code() && offset_reg != no_reg) {
__ AssertZeroExtended(offset_reg);
}
......@@ -623,8 +604,7 @@ void LiftoffAssembler::AtomicCompareExchange(
// move any use to another register.
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, value_reg);
liftoff::ClearRegister(this, rax, {&dst_addr, &offset_reg, &value_reg},
pinned);
ClearRegister(rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (expected.gp() != rax) {
movq(rax, expected.gp());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment