Commit c508ff8c authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

Reland "ppc: [liftoff] implement AtomicExch and AtomicCmpExch"

This is a reland of 3600aabf

Original change's description:
> ppc: [liftoff] implement AtomicExch and AtomicCmpExch
>
> Change-Id: Ida66b9c42cfb9bd5b59a83188a2dfa0d602d4036
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3192427
> Reviewed-by: Milad Fa <mfarazma@redhat.com>
> Commit-Queue: Junliang Yan <junyan@redhat.com>
> Cr-Commit-Position: refs/heads/main@{#77148}

Change-Id: I84dc2d2c429c1f1646d0b97036ad9baa96961e56
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3216042
Commit-Queue: Junliang Yan <junyan@redhat.com>
Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/main@{#77381}
parent 84cfc9ca
......@@ -614,14 +614,144 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
bailout(kAtomics, "AtomicExchange");
#if defined(V8_OS_AIX)
bailout(kUnsupportedArchitecture, "atomic");
#else
Register offset = r0;
if (offset_imm != 0) {
mov(ip, Operand(offset_imm));
if (offset_reg != no_reg) {
add(ip, ip, offset_reg);
}
offset = ip;
} else {
if (offset_reg != no_reg) {
offset = offset_reg;
}
}
MemOperand dst = MemOperand(offset, dst_addr);
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
TurboAssembler::AtomicExchange<uint8_t>(dst, value.gp(), result.gp());
break;
}
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
if (is_be) {
ByteReverseU16(r0, value.gp());
TurboAssembler::AtomicExchange<uint16_t>(dst, r0, result.gp());
ByteReverseU16(result.gp(), result.gp());
} else {
TurboAssembler::AtomicExchange<uint16_t>(dst, value.gp(), result.gp());
}
break;
}
case StoreType::kI32Store:
case StoreType::kI64Store32: {
if (is_be) {
ByteReverseU32(r0, value.gp());
TurboAssembler::AtomicExchange<uint32_t>(dst, r0, result.gp());
ByteReverseU32(result.gp(), result.gp());
} else {
TurboAssembler::AtomicExchange<uint32_t>(dst, value.gp(), result.gp());
}
break;
}
case StoreType::kI64Store: {
if (is_be) {
ByteReverseU64(r0, value.gp());
TurboAssembler::AtomicExchange<uint64_t>(dst, r0, result.gp());
ByteReverseU64(result.gp(), result.gp());
} else {
TurboAssembler::AtomicExchange<uint64_t>(dst, value.gp(), result.gp());
}
break;
}
default:
UNREACHABLE();
}
#endif
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
#if defined(V8_OS_AIX)
bailout(kUnsupportedArchitecture, "atomic");
#else
Register offset = r0;
if (offset_imm != 0) {
mov(ip, Operand(offset_imm));
if (offset_reg != no_reg) {
add(ip, ip, offset_reg);
}
offset = ip;
} else {
if (offset_reg != no_reg) {
offset = offset_reg;
}
}
MemOperand dst = MemOperand(offset, dst_addr);
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
TurboAssembler::AtomicCompareExchange<uint8_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
break;
}
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
if (is_be) {
Push(r3, r4);
ByteReverseU16(r3, new_value.gp());
ByteReverseU16(r4, expected.gp());
TurboAssembler::AtomicCompareExchange<uint16_t>(dst, r4, r3,
result.gp(), r0);
ByteReverseU16(result.gp(), result.gp());
Pop(r3, r4);
} else {
TurboAssembler::AtomicCompareExchange<uint16_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
}
break;
}
case StoreType::kI32Store:
case StoreType::kI64Store32: {
if (is_be) {
Push(r3, r4);
ByteReverseU32(r3, new_value.gp());
ByteReverseU32(r4, expected.gp());
TurboAssembler::AtomicCompareExchange<uint32_t>(dst, r4, r3,
result.gp(), r0);
ByteReverseU32(result.gp(), result.gp());
Pop(r3, r4);
} else {
TurboAssembler::AtomicCompareExchange<uint32_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
}
break;
}
case StoreType::kI64Store: {
if (is_be) {
Push(r3, r4);
ByteReverseU64(r3, new_value.gp());
ByteReverseU64(r4, expected.gp());
TurboAssembler::AtomicCompareExchange<uint64_t>(dst, r4, r3,
result.gp(), r0);
ByteReverseU64(result.gp(), result.gp());
Pop(r3, r4);
} else {
TurboAssembler::AtomicCompareExchange<uint64_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
}
break;
}
default:
UNREACHABLE();
}
#endif
}
void LiftoffAssembler::AtomicFence() { sync(); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment