Commit fda7ef4f authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

Revert "[wasm][liftoff] Implement Atomic(Compare)Exchange on x64"

This reverts commit c8e10a16.

Reason for revert: This CL causes problems with webpages in the wild: https://crbug.com/1056295

Original change's description:
> [wasm][liftoff] Implement Atomic(Compare)Exchange on x64
> 
> R=​clemensb@chromium.org
> 
> Bug: v8:10108
> Change-Id: Ic9ef9ba35218450d3f9e7838890c82b785c34da4
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2037433
> Commit-Queue: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#66429}

TBR=ahaas@chromium.org,clemensb@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:10108
Change-Id: I69251b9e3de13d8314e413a4196c8185a73bfb5c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2078544Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66494}
parent fdf00b1e
......@@ -582,19 +582,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -382,19 +382,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -519,19 +519,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -465,16 +465,6 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister result,
StoreType type);
inline void AtomicCompareExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister expected,
LiftoffRegister new_value,
LiftoffRegister value, StoreType type);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
......
......@@ -2371,38 +2371,6 @@ class LiftoffCompiler {
__ PushRegister(result_type, result);
}
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
#ifdef V8_TARGET_ARCH_IA32
// With the current implementation we do not have enough registers on ia32
// to even get to the platform-specific code. Therefore we bailout early.
unsupported(decoder, kAtomics, "AtomicCompareExchange");
return;
#else
ValueType result_type = type.value_type();
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister());
Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index, pinned,
kDoForceCheck)) {
return;
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uint32_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_type), pinned));
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
type);
__ PushRegister(result_type, result);
#endif
}
#define ATOMIC_STORE_LIST(V) \
V(I32AtomicStore, kI32Store) \
V(I64AtomicStore, kI64Store) \
......@@ -2421,58 +2389,42 @@ class LiftoffCompiler {
V(I64AtomicLoad16U, kI64Load16U) \
V(I64AtomicLoad32U, kI64Load32U)
#define ATOMIC_BINOP_INSTRUCTION_LIST(V) \
V(Add, I32AtomicAdd, kI32Store) \
V(Add, I64AtomicAdd, kI64Store) \
V(Add, I32AtomicAdd8U, kI32Store8) \
V(Add, I32AtomicAdd16U, kI32Store16) \
V(Add, I64AtomicAdd8U, kI64Store8) \
V(Add, I64AtomicAdd16U, kI64Store16) \
V(Add, I64AtomicAdd32U, kI64Store32) \
V(Sub, I32AtomicSub, kI32Store) \
V(Sub, I64AtomicSub, kI64Store) \
V(Sub, I32AtomicSub8U, kI32Store8) \
V(Sub, I32AtomicSub16U, kI32Store16) \
V(Sub, I64AtomicSub8U, kI64Store8) \
V(Sub, I64AtomicSub16U, kI64Store16) \
V(Sub, I64AtomicSub32U, kI64Store32) \
V(And, I32AtomicAnd, kI32Store) \
V(And, I64AtomicAnd, kI64Store) \
V(And, I32AtomicAnd8U, kI32Store8) \
V(And, I32AtomicAnd16U, kI32Store16) \
V(And, I64AtomicAnd8U, kI64Store8) \
V(And, I64AtomicAnd16U, kI64Store16) \
V(And, I64AtomicAnd32U, kI64Store32) \
V(Or, I32AtomicOr, kI32Store) \
V(Or, I64AtomicOr, kI64Store) \
V(Or, I32AtomicOr8U, kI32Store8) \
V(Or, I32AtomicOr16U, kI32Store16) \
V(Or, I64AtomicOr8U, kI64Store8) \
V(Or, I64AtomicOr16U, kI64Store16) \
V(Or, I64AtomicOr32U, kI64Store32) \
V(Xor, I32AtomicXor, kI32Store) \
V(Xor, I64AtomicXor, kI64Store) \
V(Xor, I32AtomicXor8U, kI32Store8) \
V(Xor, I32AtomicXor16U, kI32Store16) \
V(Xor, I64AtomicXor8U, kI64Store8) \
V(Xor, I64AtomicXor16U, kI64Store16) \
V(Xor, I64AtomicXor32U, kI64Store32) \
V(Exchange, I32AtomicExchange, kI32Store) \
V(Exchange, I64AtomicExchange, kI64Store) \
V(Exchange, I32AtomicExchange8U, kI32Store8) \
V(Exchange, I32AtomicExchange16U, kI32Store16) \
V(Exchange, I64AtomicExchange8U, kI64Store8) \
V(Exchange, I64AtomicExchange16U, kI64Store16) \
V(Exchange, I64AtomicExchange32U, kI64Store32)
#define ATOMIC_COMPARE_EXCHANGE_LIST(V) \
V(I32AtomicCompareExchange, kI32Store) \
V(I64AtomicCompareExchange, kI64Store) \
V(I32AtomicCompareExchange8U, kI32Store8) \
V(I32AtomicCompareExchange16U, kI32Store16) \
V(I64AtomicCompareExchange8U, kI64Store8) \
V(I64AtomicCompareExchange16U, kI64Store16) \
V(I64AtomicCompareExchange32U, kI64Store32)
#define ATOMIC_BINOP_INSTRUCTION_LIST(V) \
V(Add, I32AtomicAdd, kI32Store) \
V(Add, I64AtomicAdd, kI64Store) \
V(Add, I32AtomicAdd8U, kI32Store8) \
V(Add, I32AtomicAdd16U, kI32Store16) \
V(Add, I64AtomicAdd8U, kI64Store8) \
V(Add, I64AtomicAdd16U, kI64Store16) \
V(Add, I64AtomicAdd32U, kI64Store32) \
V(Sub, I32AtomicSub, kI32Store) \
V(Sub, I64AtomicSub, kI64Store) \
V(Sub, I32AtomicSub8U, kI32Store8) \
V(Sub, I32AtomicSub16U, kI32Store16) \
V(Sub, I64AtomicSub8U, kI64Store8) \
V(Sub, I64AtomicSub16U, kI64Store16) \
V(Sub, I64AtomicSub32U, kI64Store32) \
V(And, I32AtomicAnd, kI32Store) \
V(And, I64AtomicAnd, kI64Store) \
V(And, I32AtomicAnd8U, kI32Store8) \
V(And, I32AtomicAnd16U, kI32Store16) \
V(And, I64AtomicAnd8U, kI64Store8) \
V(And, I64AtomicAnd16U, kI64Store16) \
V(And, I64AtomicAnd32U, kI64Store32) \
V(Or, I32AtomicOr, kI32Store) \
V(Or, I64AtomicOr, kI64Store) \
V(Or, I32AtomicOr8U, kI32Store8) \
V(Or, I32AtomicOr16U, kI32Store16) \
V(Or, I64AtomicOr8U, kI64Store8) \
V(Or, I64AtomicOr16U, kI64Store16) \
V(Or, I64AtomicOr32U, kI64Store32) \
V(Xor, I32AtomicXor, kI32Store) \
V(Xor, I64AtomicXor, kI64Store) \
V(Xor, I32AtomicXor8U, kI32Store8) \
V(Xor, I32AtomicXor16U, kI32Store16) \
V(Xor, I64AtomicXor8U, kI64Store8) \
V(Xor, I64AtomicXor16U, kI64Store16) \
V(Xor, I64AtomicXor32U, kI64Store32)
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
......@@ -2500,14 +2452,6 @@ class LiftoffCompiler {
ATOMIC_BINOP_INSTRUCTION_LIST(ATOMIC_BINOP_OP)
#undef ATOMIC_BINOP_OP
#define ATOMIC_COMPARE_EXCHANGE_OP(name, type) \
case wasm::kExpr##name: \
AtomicCompareExchange(decoder, StoreType::type, imm); \
break;
ATOMIC_COMPARE_EXCHANGE_LIST(ATOMIC_COMPARE_EXCHANGE_OP)
#undef ATOMIC_COMPARE_EXCHANGE_OP
default:
unsupported(decoder, kAtomics, "atomicop");
}
......@@ -2516,7 +2460,6 @@ class LiftoffCompiler {
#undef ATOMIC_STORE_LIST
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_BINOP_INSTRUCTION_LIST
#undef ATOMIC_COMPARE_EXCHANGE_LIST
void AtomicFence(FullDecoder* decoder) {
unsupported(decoder, kAtomics, "atomic.fence");
......
......@@ -568,19 +568,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -484,19 +484,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -170,19 +170,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -169,19 +169,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
bailout(kAtomics, "AtomicXor");
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
bailout(kAtomics, "AtomicExchange");
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
......@@ -460,41 +460,32 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
}
namespace liftoff {
#define __ lasm->
// Checks if a register in {possible_uses} uses {reg}. If so, it allocates a
// replacement register for that use, and moves the content of {reg} to {use}.
// The replacement register is written into the pointer stored in
// {possible_uses}.
inline void ClearRegister(LiftoffAssembler* lasm, Register reg,
std::initializer_list<Register*> possible_uses,
LiftoffRegList pinned) {
liftoff::SpillRegisters(lasm, reg);
Register replacement = no_reg;
for (Register* use : possible_uses) {
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = __ GetUnusedRegister(kGpReg, pinned).gp();
__ movq(replacement, reg);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
}
}
inline void AtomicBinop(LiftoffAssembler* lasm,
void (Assembler::*opl)(Register, Register),
void (Assembler::*opq)(Register, Register),
Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
StoreType type) {
#define __ lasm->
DCHECK(!__ cache_state()->is_used(value));
Register value_reg = value.gp();
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
liftoff::SpillRegisters(lasm, rax);
Register value_reg = value.gp();
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, value_reg);
ClearRegister(lasm, rax, {&dst_addr, &offset_reg, &value_reg}, pinned);
if (pinned.has(rax)) {
Register replacement =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
for (Register* reg : {&dst_addr, &offset_reg, &value_reg}) {
if (*reg == rax) {
*reg = replacement;
}
}
__ movq(replacement, rax);
}
if (__ emit_debug_code() && offset_reg != no_reg) {
__ AssertZeroExtended(offset_reg);
}
......@@ -582,90 +573,6 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
offset_reg, offset_imm, value, type);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value, StoreType type) {
DCHECK(!cache_state()->is_used(value));
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8:
xchgb(value.gp(), dst_op);
movzxbq(value.gp(), value.gp());
break;
case StoreType::kI32Store16:
case StoreType::kI64Store16:
xchgw(value.gp(), dst_op);
movzxwq(value.gp(), value.gp());
break;
case StoreType::kI32Store:
case StoreType::kI64Store32:
xchgl(value.gp(), dst_op);
break;
case StoreType::kI64Store:
xchgq(value.gp(), dst_op);
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
Register value_reg = new_value.gp();
// The cmpxchg instruction uses rax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, value_reg);
liftoff::ClearRegister(this, rax, {&dst_addr, &offset_reg, &value_reg},
pinned);
if (expected.gp() != rax) {
movq(rax, expected.gp());
}
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
lock();
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
cmpxchgb(dst_op, value_reg);
movzxbq(rax, rax);
break;
}
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
cmpxchgw(dst_op, value_reg);
movzxwq(rax, rax);
break;
}
case StoreType::kI32Store:
case StoreType::kI64Store32: {
cmpxchgl(dst_op, value_reg);
break;
}
case StoreType::kI64Store: {
cmpxchgq(dst_op, value_reg);
break;
}
default:
UNREACHABLE();
}
if (result.gp() != rax) {
movq(result.gp(), rax);
}
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment