Commit 5005a841 authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][liftoff] Implement AtomicAdd on x64

R=clemensb@chromium.org

Bug: v8:10108
Change-Id: Ia4fb73e1771971638ca646702838b5722dafe140
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2010112
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65926}
parent 7ae8c055
......@@ -1204,7 +1204,12 @@ void Assembler::lock() {
void Assembler::xaddb(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src, dst);
if (!src.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
emit_rex_32(src, dst);
} else {
emit_optional_rex_32(src, dst);
}
emit(0x0F);
emit(0xC0);
emit_operand(src, dst);
......
......@@ -552,6 +552,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -352,6 +352,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -491,6 +491,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -444,6 +444,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned);
inline void AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
......
......@@ -2255,6 +2255,38 @@ class LiftoffCompiler {
}
}
void AtomicAdd(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
ValueType result_type = type.value_type();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
// We have to reuse the value register as the result register so that we
// don't run out of registers on ia32. For this we use the value register as
// the result register if it has no other uses, or we allocate a new
// register and let go of the value register to get spilled.
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
__ Move(result, value, result_type);
pinned.clear(value);
}
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
return;
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uint32_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Atomic add");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
__ AtomicAdd(addr, index, offset, result, type);
__ PushRegister(result_type, result);
}
#define ATOMIC_STORE_LIST(V) \
V(I32AtomicStore, kI32Store) \
V(I64AtomicStore, kI64Store) \
......@@ -2273,6 +2305,15 @@ class LiftoffCompiler {
V(I64AtomicLoad16U, kI64Load16U) \
V(I64AtomicLoad32U, kI64Load32U)
#define ATOMIC_ADD_LIST(V) \
V(I32AtomicAdd, kI32Store) \
V(I64AtomicAdd, kI64Store) \
V(I32AtomicAdd8U, kI32Store8) \
V(I32AtomicAdd16U, kI32Store16) \
V(I64AtomicAdd8U, kI64Store8) \
V(I64AtomicAdd16U, kI64Store16) \
V(I64AtomicAdd32U, kI64Store32)
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
switch (opcode) {
......@@ -2291,6 +2332,14 @@ class LiftoffCompiler {
ATOMIC_LOAD_LIST(ATOMIC_LOAD_OP)
#undef ATOMIC_LOAD_OP
#define ATOMIC_ADD_OP(name, type) \
case wasm::kExpr##name: \
AtomicAdd(decoder, StoreType::type, imm); \
break;
ATOMIC_ADD_LIST(ATOMIC_ADD_OP)
#undef ATOMIC_ADD_OP
default:
unsupported(decoder, kAtomics, "atomicop");
}
......@@ -2298,6 +2347,7 @@ class LiftoffCompiler {
#undef ATOMIC_STORE_LIST
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_ADD_LIST
void AtomicFence(FullDecoder* decoder) {
unsupported(decoder, kAtomics, "atomic.fence");
......
......@@ -538,6 +538,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -454,6 +454,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -140,6 +140,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -139,6 +139,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicStore");
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -381,6 +381,35 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
lock();
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8:
xaddb(dst_op, value.gp());
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
xaddw(dst_op, value.gp());
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
xaddl(dst_op, value.gp());
break;
case StoreType::kI64Store:
xaddq(dst_op, value.gp());
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment