Commit 62cde006 authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][liftoff] Implement Atomic[And|Or|Xor] on x64

R=clemensb@chromium.org

Bug: v8:10108
Change-Id: I0a561a007ffd5327f47f68cc605b9eae57829d8e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2016596
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66010}
parent 3387b770
......@@ -558,6 +558,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -358,6 +358,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -497,6 +497,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -449,6 +449,22 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
......
......@@ -2284,15 +2284,18 @@ class LiftoffCompiler {
}
}
void AtomicAdd(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
void AtomicBinop(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
void (LiftoffAssembler::*emit_fn)(Register, Register,
uint32_t, LiftoffRegister,
StoreType)) {
ValueType result_type = type.value_type();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
// We have to reuse the value register as the result register so that we
// don't run out of registers on ia32. For this we use the value register as
// the result register if it has no other uses, or we allocate a new
// register and let go of the value register to get spilled.
// don't run out of registers on ia32. For this we use the value register
// as the result register if it has no other uses. Otherwise we allocate
// a new register and let go of the value register to get spilled.
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
......@@ -2308,11 +2311,10 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Atomic add");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
__ AtomicAdd(addr, index, offset, result, type);
(asm_.*emit_fn)(addr, index, offset, result, type);
__ PushRegister(result_type, result);
}
......@@ -2334,14 +2336,42 @@ class LiftoffCompiler {
V(I64AtomicLoad16U, kI64Load16U) \
V(I64AtomicLoad32U, kI64Load32U)
#define ATOMIC_ADD_LIST(V) \
V(I32AtomicAdd, kI32Store) \
V(I64AtomicAdd, kI64Store) \
V(I32AtomicAdd8U, kI32Store8) \
V(I32AtomicAdd16U, kI32Store16) \
V(I64AtomicAdd8U, kI64Store8) \
V(I64AtomicAdd16U, kI64Store16) \
V(I64AtomicAdd32U, kI64Store32)
#define ATOMIC_BINOP_INSTRUCTION_LIST(V) \
V(Add, I32AtomicAdd, kI32Store) \
V(Add, I64AtomicAdd, kI64Store) \
V(Add, I32AtomicAdd8U, kI32Store8) \
V(Add, I32AtomicAdd16U, kI32Store16) \
V(Add, I64AtomicAdd8U, kI64Store8) \
V(Add, I64AtomicAdd16U, kI64Store16) \
V(Add, I64AtomicAdd32U, kI64Store32) \
V(Sub, I32AtomicSub, kI32Store) \
V(Sub, I64AtomicSub, kI64Store) \
V(Sub, I32AtomicSub8U, kI32Store8) \
V(Sub, I32AtomicSub16U, kI32Store16) \
V(Sub, I64AtomicSub8U, kI64Store8) \
V(Sub, I64AtomicSub16U, kI64Store16) \
V(Sub, I64AtomicSub32U, kI64Store32) \
V(And, I32AtomicAnd, kI32Store) \
V(And, I64AtomicAnd, kI64Store) \
V(And, I32AtomicAnd8U, kI32Store8) \
V(And, I32AtomicAnd16U, kI32Store16) \
V(And, I64AtomicAnd8U, kI64Store8) \
V(And, I64AtomicAnd16U, kI64Store16) \
V(And, I64AtomicAnd32U, kI64Store32) \
V(Or, I32AtomicOr, kI32Store) \
V(Or, I64AtomicOr, kI64Store) \
V(Or, I32AtomicOr8U, kI32Store8) \
V(Or, I32AtomicOr16U, kI32Store16) \
V(Or, I64AtomicOr8U, kI64Store8) \
V(Or, I64AtomicOr16U, kI64Store16) \
V(Or, I64AtomicOr32U, kI64Store32) \
V(Xor, I32AtomicXor, kI32Store) \
V(Xor, I64AtomicXor, kI64Store) \
V(Xor, I32AtomicXor8U, kI32Store8) \
V(Xor, I32AtomicXor16U, kI32Store16) \
V(Xor, I64AtomicXor8U, kI64Store8) \
V(Xor, I64AtomicXor16U, kI64Store16) \
V(Xor, I64AtomicXor32U, kI64Store32)
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
......@@ -2362,13 +2392,13 @@ class LiftoffCompiler {
ATOMIC_LOAD_LIST(ATOMIC_LOAD_OP)
#undef ATOMIC_LOAD_OP
#define ATOMIC_ADD_OP(name, type) \
#define ATOMIC_BINOP_OP(op, name, type) \
case wasm::kExpr##name: \
AtomicAdd(decoder, StoreType::type, imm); \
AtomicBinop(decoder, StoreType::type, imm, &LiftoffAssembler::Atomic##op); \
break;
ATOMIC_ADD_LIST(ATOMIC_ADD_OP)
#undef ATOMIC_ADD_OP
ATOMIC_BINOP_INSTRUCTION_LIST(ATOMIC_BINOP_OP)
#undef ATOMIC_BINOP_OP
default:
unsupported(decoder, kAtomics, "atomicop");
}
......@@ -2376,7 +2406,7 @@ class LiftoffCompiler {
#undef ATOMIC_STORE_LIST
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_ADD_LIST
#undef ATOMIC_BINOP_INSTRUCTION_LIST
void AtomicFence(FullDecoder* decoder) {
unsupported(decoder, kAtomics, "atomic.fence");
......
......@@ -544,6 +544,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -460,6 +460,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -146,6 +146,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -145,6 +145,30 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicAdd");
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicAnd");
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicOr");
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -412,6 +412,126 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
}
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
bailout(kAtomics, "AtomicSub");
}
namespace liftoff {
inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
#define __ lasm->
DCHECK(!__ cache_state()->is_used(value));
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
liftoff::SpillRegisters(lasm, rax);
Register value_reg = value.gp();
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
if (pinned.has(rax)) {
Register replacement =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
for (Register* reg : {&dst_addr, &offset_reg, &value_reg}) {
if (*reg == rax) {
*reg = replacement;
}
}
__ movq(replacement, rax);
}
if (__ emit_debug_code() && offset_reg != no_reg) {
__ AssertZeroExtended(offset_reg);
}
Operand dst_op = liftoff::GetMemOp(lasm, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
Label binop;
__ xorq(rax, rax);
__ movb(rax, dst_op);
__ bind(&binop);
__ movl(kScratchRegister, rax);
(lasm->*opl)(kScratchRegister, value_reg);
__ lock();
__ cmpxchgb(dst_op, kScratchRegister);
__ j(not_equal, &binop);
break;
}
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
Label binop;
__ xorq(rax, rax);
__ movw(rax, dst_op);
__ bind(&binop);
__ movl(kScratchRegister, rax);
(lasm->*opl)(kScratchRegister, value_reg);
__ lock();
__ cmpxchgw(dst_op, kScratchRegister);
__ j(not_equal, &binop);
break;
}
case StoreType::kI32Store:
case StoreType::kI64Store32: {
Label binop;
__ movl(rax, dst_op);
__ bind(&binop);
__ movl(kScratchRegister, rax);
(lasm->*opl)(kScratchRegister, value_reg);
__ lock();
__ cmpxchgl(dst_op, kScratchRegister);
__ j(not_equal, &binop);
break;
}
case StoreType::kI64Store: {
Label binop;
__ movq(rax, dst_op);
__ bind(&binop);
__ movq(kScratchRegister, rax);
(lasm->*opq)(kScratchRegister, value_reg);
__ lock();
__ cmpxchgq(dst_op, kScratchRegister);
__ j(not_equal, &binop);
break;
}
default:
UNREACHABLE();
}
if (value.gp() != rax) {
__ movq(value.gp(), rax);
}
}
#undef __
} // namespace liftoff
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
liftoff::AtomicBinop(this, &Assembler::andl, &Assembler::andq, dst_addr,
offset_reg, offset_imm, value, type);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
liftoff::AtomicBinop(this, &Assembler::orl, &Assembler::orq, dst_addr,
offset_reg, offset_imm, value, type);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
liftoff::AtomicBinop(this, &Assembler::xorl, &Assembler::xorq, dst_addr,
offset_reg, offset_imm, value, type);
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment