Commit 8fe01fea authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[arm] Implement I64Atomic operations on Arm

 - Implement all the I64Atomic operations on ARM
 - Change assembler methods to use Registers instead of memory operands
 - Move atomics64 test up be tested on all archs, disable tests on MIPS

BUG:v8:6532

Change-Id: I91bd42fa819f194be15c719266c36230f9c65db8
Reviewed-on: https://chromium-review.googlesource.com/1180211
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55416}
parent 4ac19c38
......@@ -2240,29 +2240,27 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
0xF9 * B4 | src2.code());
}
void Assembler::ldrexd(Register dst1, Register dst2, const MemOperand& src,
void Assembler::ldrexd(Register dst1, Register dst2, Register src,
Condition cond) {
// cond(31-28) | 00011011(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
DCHECK(src.rm() == no_reg);
DCHECK(dst1 != lr); // r14.
// The pair of destination registers is restricted to being an even-numbered
// register and the odd-numbered register that immediately follows it.
DCHECK_EQ(0, dst1.code() % 2);
DCHECK_EQ(dst1.code() + 1, dst2.code());
emit(cond | B24 | B23 | B21 | B20 | src.rn_.code() * B16 | dst1.code() * B12 |
emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
0xF9F);
}
void Assembler::strexd(Register res, Register src1, Register src2,
const MemOperand& dst, Condition cond) {
void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
Condition cond) {
// cond(31-28) | 00011010(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
DCHECK(dst.rm() == no_reg);
DCHECK(src1 != lr); // r14.
// The pair of source registers is restricted to being an even-numbered
// register and the odd-numbered register that immediately follows it.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
emit(cond | B24 | B23 | B21 | dst.rn_.code() * B16 | res.code() * B12 |
emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
0xF9 * B4 | src1.code());
}
......
......@@ -930,9 +930,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void strexb(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexh(Register dst, Register src, Condition cond = al);
void strexh(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexd(Register dst1, Register dst2, const MemOperand& src,
Condition cond = al);
void strexd(Register res, Register src1, Register src2, const MemOperand& dst,
void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
void strexd(Register res, Register src1, Register src2, Register dst,
Condition cond = al);
// Preload instructions
......
This diff is collapsed.
......@@ -269,7 +269,37 @@ namespace compiler {
V(ArmS1x8AnyTrue) \
V(ArmS1x8AllTrue) \
V(ArmS1x16AnyTrue) \
V(ArmS1x16AllTrue)
V(ArmS1x16AllTrue) \
V(ArmWord32AtomicPairLoad) \
V(ArmWord32AtomicPairStore) \
V(ArmWord32AtomicPairAdd) \
V(ArmWord32AtomicPairSub) \
V(ArmWord32AtomicPairAnd) \
V(ArmWord32AtomicPairOr) \
V(ArmWord32AtomicPairXor) \
V(ArmWord32AtomicPairExchange) \
V(ArmWord32AtomicPairCompareExchange) \
V(ArmWord64AtomicNarrowAddUint8) \
V(ArmWord64AtomicNarrowAddUint16) \
V(ArmWord64AtomicNarrowAddUint32) \
V(ArmWord64AtomicNarrowSubUint8) \
V(ArmWord64AtomicNarrowSubUint16) \
V(ArmWord64AtomicNarrowSubUint32) \
V(ArmWord64AtomicNarrowAndUint8) \
V(ArmWord64AtomicNarrowAndUint16) \
V(ArmWord64AtomicNarrowAndUint32) \
V(ArmWord64AtomicNarrowOrUint8) \
V(ArmWord64AtomicNarrowOrUint16) \
V(ArmWord64AtomicNarrowOrUint32) \
V(ArmWord64AtomicNarrowXorUint8) \
V(ArmWord64AtomicNarrowXorUint16) \
V(ArmWord64AtomicNarrowXorUint32) \
V(ArmWord64AtomicNarrowExchangeUint8) \
V(ArmWord64AtomicNarrowExchangeUint16) \
V(ArmWord64AtomicNarrowExchangeUint32) \
V(ArmWord64AtomicNarrowCompareExchangeUint8) \
V(ArmWord64AtomicNarrowCompareExchangeUint16) \
V(ArmWord64AtomicNarrowCompareExchangeUint32)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -264,6 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrsh:
case kArmLdr:
case kArmPeek:
case kArmWord32AtomicPairLoad:
return kIsLoadOperation;
case kArmVstrF32:
......@@ -276,6 +277,35 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmPush:
case kArmPoke:
case kArmDsbIsb:
case kArmWord32AtomicPairStore:
case kArmWord32AtomicPairAdd:
case kArmWord32AtomicPairSub:
case kArmWord32AtomicPairAnd:
case kArmWord32AtomicPairOr:
case kArmWord32AtomicPairXor:
case kArmWord32AtomicPairExchange:
case kArmWord32AtomicPairCompareExchange:
case kArmWord64AtomicNarrowAddUint8:
case kArmWord64AtomicNarrowAddUint16:
case kArmWord64AtomicNarrowAddUint32:
case kArmWord64AtomicNarrowSubUint8:
case kArmWord64AtomicNarrowSubUint16:
case kArmWord64AtomicNarrowSubUint32:
case kArmWord64AtomicNarrowAndUint8:
case kArmWord64AtomicNarrowAndUint16:
case kArmWord64AtomicNarrowAndUint32:
case kArmWord64AtomicNarrowOrUint8:
case kArmWord64AtomicNarrowOrUint16:
case kArmWord64AtomicNarrowOrUint32:
case kArmWord64AtomicNarrowXorUint8:
case kArmWord64AtomicNarrowXorUint16:
case kArmWord64AtomicNarrowXorUint32:
case kArmWord64AtomicNarrowExchangeUint8:
case kArmWord64AtomicNarrowExchangeUint16:
case kArmWord64AtomicNarrowExchangeUint32:
case kArmWord64AtomicNarrowCompareExchangeUint8:
case kArmWord64AtomicNarrowCompareExchangeUint16:
case kArmWord64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
......
......@@ -2389,7 +2389,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
UNIMPLEMENTED();
}
......@@ -2453,7 +2453,7 @@ void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
......
......@@ -243,6 +243,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-atomics.cc",
"wasm/test-run-wasm-atomics64.cc",
"wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc",
......@@ -299,7 +300,6 @@ v8_source_set("cctest_sources") {
"test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
"wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
......@@ -309,7 +309,6 @@ v8_source_set("cctest_sources") {
"test-code-stubs.h",
"test-disasm-ia32.cc",
"test-log-stack-tracer.cc",
"wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "mips") {
sources += [ ### gcmole(arch:mips) ###
......@@ -356,7 +355,6 @@ v8_source_set("cctest_sources") {
"test-disasm-x64.cc",
"test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc",
"wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
......
......@@ -344,6 +344,12 @@
'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP],
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
##############################################################################
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
# TODO(mips-team): Implement I64Atomic operations on MIPS
'test-run-wasm-atomics64/*': [SKIP],
}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
##############################################################################
['mips_arch_variant == r6', {
# For MIPS[64] architecture release 6, fusion multiply-accumulate instructions
......
......@@ -1615,8 +1615,8 @@ TEST(LoadStoreExclusive) {
COMPARE(strexh(r0, r1, r2), "e1e20f91 strexh r0, r1, [r2]");
COMPARE(ldrex(r0, r1), "e1910f9f ldrex r0, [r1]");
COMPARE(strex(r0, r1, r2), "e1820f91 strex r0, r1, [r2]");
COMPARE(ldrexd(r0, r1, MemOperand(r2)), "e1b20f9f ldrexd r0, [r2]");
COMPARE(strexd(r0, r2, r3, MemOperand(r4)),
COMPARE(ldrexd(r0, r1, r2), "e1b20f9f ldrexd r0, [r2]");
COMPARE(strexd(r0, r2, r3, r4),
"e1a40f92 strexd r0, r2, [r4]");
VERIFY_RUN();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment