Commit 57817760 authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][ia32][liftoff] Implement 32-bit atomic sub

R=clemensb@chromium.org

Bug: v8:10108
Change-Id: I5050d58d9601214e88bb88b24502f288de0b7be8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2264357Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68517}
parent 3e3403ea
......@@ -487,16 +487,17 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
}
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "AtomicAdd");
return;
}
namespace liftoff {
#define __ lasm->
enum class AddOrSub { kAdd, kSub };
inline void AtomicAddOrSub32(LiftoffAssembler* lasm, AddOrSub add_or_sub,
Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK_EQ(value, result);
DCHECK(!cache_state()->is_used(result));
DCHECK(!__ cache_state()->is_used(result));
bool is_64_bit_op = type.value_type() == kWasmI64;
Register value_reg = is_64_bit_op ? value.low_gp() : value.gp();
......@@ -509,43 +510,65 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
mov(safe_value_reg, value_reg);
__ GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
__ mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
}
Operand dst_op = Operand(dst_addr, offset_reg, times_1, offset_imm);
lock();
if (add_or_sub == AddOrSub::kSub) {
__ neg(value_reg);
}
__ lock();
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
xadd_b(dst_op, value_reg);
movzx_b(result_reg, value_reg);
__ xadd_b(dst_op, value_reg);
__ movzx_b(result_reg, value_reg);
break;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
xadd_w(dst_op, value_reg);
movzx_w(result_reg, value_reg);
__ xadd_w(dst_op, value_reg);
__ movzx_w(result_reg, value_reg);
break;
case StoreType::kI64Store32:
case StoreType::kI32Store:
xadd(dst_op, value_reg);
__ xadd(dst_op, value_reg);
if (value_reg != result_reg) {
mov(result_reg, value_reg);
__ mov(result_reg, value_reg);
}
break;
default:
UNREACHABLE();
}
if (is_64_bit_op) {
xor_(result.high_gp(), result.high_gp());
__ xor_(result.high_gp(), result.high_gp());
}
}
#undef __
} // namespace liftoff
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "AtomicAdd");
return;
}
liftoff::AtomicAddOrSub32(this, liftoff::AddOrSub::kAdd, dst_addr, offset_reg,
offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicSub");
if (type.value() == StoreType::kI64Store) {
bailout(kAtomics, "AtomicSub");
return;
}
liftoff::AtomicAddOrSub32(this, liftoff::AddOrSub::kSub, dst_addr, offset_reg,
offset_imm, value, result, type);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment