Commit 8fe01fea authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[arm] Implement I64Atomic operations on Arm

 - Implement all the I64Atomic operations on ARM
 - Change assembler methods to use Registers instead of memory operands
 - Move atomics64 test up be tested on all archs, disable tests on MIPS

BUG:v8:6532

Change-Id: I91bd42fa819f194be15c719266c36230f9c65db8
Reviewed-on: https://chromium-review.googlesource.com/1180211
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55416}
parent 4ac19c38
...@@ -2240,29 +2240,27 @@ void Assembler::strexh(Register src1, Register src2, Register dst, ...@@ -2240,29 +2240,27 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
0xF9 * B4 | src2.code()); 0xF9 * B4 | src2.code());
} }
void Assembler::ldrexd(Register dst1, Register dst2, const MemOperand& src, void Assembler::ldrexd(Register dst1, Register dst2, Register src,
Condition cond) { Condition cond) {
// cond(31-28) | 00011011(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0) // cond(31-28) | 00011011(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
DCHECK(src.rm() == no_reg);
DCHECK(dst1 != lr); // r14. DCHECK(dst1 != lr); // r14.
// The pair of destination registers is restricted to being an even-numbered // The pair of destination registers is restricted to being an even-numbered
// register and the odd-numbered register that immediately follows it. // register and the odd-numbered register that immediately follows it.
DCHECK_EQ(0, dst1.code() % 2); DCHECK_EQ(0, dst1.code() % 2);
DCHECK_EQ(dst1.code() + 1, dst2.code()); DCHECK_EQ(dst1.code() + 1, dst2.code());
emit(cond | B24 | B23 | B21 | B20 | src.rn_.code() * B16 | dst1.code() * B12 | emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
0xF9F); 0xF9F);
} }
void Assembler::strexd(Register res, Register src1, Register src2, void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
const MemOperand& dst, Condition cond) { Condition cond) {
// cond(31-28) | 00011010(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0) // cond(31-28) | 00011010(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
DCHECK(dst.rm() == no_reg);
DCHECK(src1 != lr); // r14. DCHECK(src1 != lr); // r14.
// The pair of source registers is restricted to being an even-numbered // The pair of source registers is restricted to being an even-numbered
// register and the odd-numbered register that immediately follows it. // register and the odd-numbered register that immediately follows it.
DCHECK_EQ(0, src1.code() % 2); DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code()); DCHECK_EQ(src1.code() + 1, src2.code());
emit(cond | B24 | B23 | B21 | dst.rn_.code() * B16 | res.code() * B12 | emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
0xF9 * B4 | src1.code()); 0xF9 * B4 | src1.code());
} }
......
...@@ -930,9 +930,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -930,9 +930,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void strexb(Register src1, Register src2, Register dst, Condition cond = al); void strexb(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexh(Register dst, Register src, Condition cond = al); void ldrexh(Register dst, Register src, Condition cond = al);
void strexh(Register src1, Register src2, Register dst, Condition cond = al); void strexh(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexd(Register dst1, Register dst2, const MemOperand& src, void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
Condition cond = al); void strexd(Register res, Register src1, Register src2, Register dst,
void strexd(Register res, Register src1, Register src2, const MemOperand& dst,
Condition cond = al); Condition cond = al);
// Preload instructions // Preload instructions
......
This diff is collapsed.
This diff is collapsed.
...@@ -264,6 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -264,6 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrsh: case kArmLdrsh:
case kArmLdr: case kArmLdr:
case kArmPeek: case kArmPeek:
case kArmWord32AtomicPairLoad:
return kIsLoadOperation; return kIsLoadOperation;
case kArmVstrF32: case kArmVstrF32:
...@@ -276,6 +277,35 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -276,6 +277,35 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmPush: case kArmPush:
case kArmPoke: case kArmPoke:
case kArmDsbIsb: case kArmDsbIsb:
case kArmWord32AtomicPairStore:
case kArmWord32AtomicPairAdd:
case kArmWord32AtomicPairSub:
case kArmWord32AtomicPairAnd:
case kArmWord32AtomicPairOr:
case kArmWord32AtomicPairXor:
case kArmWord32AtomicPairExchange:
case kArmWord32AtomicPairCompareExchange:
case kArmWord64AtomicNarrowAddUint8:
case kArmWord64AtomicNarrowAddUint16:
case kArmWord64AtomicNarrowAddUint32:
case kArmWord64AtomicNarrowSubUint8:
case kArmWord64AtomicNarrowSubUint16:
case kArmWord64AtomicNarrowSubUint32:
case kArmWord64AtomicNarrowAndUint8:
case kArmWord64AtomicNarrowAndUint16:
case kArmWord64AtomicNarrowAndUint32:
case kArmWord64AtomicNarrowOrUint8:
case kArmWord64AtomicNarrowOrUint16:
case kArmWord64AtomicNarrowOrUint32:
case kArmWord64AtomicNarrowXorUint8:
case kArmWord64AtomicNarrowXorUint16:
case kArmWord64AtomicNarrowXorUint32:
case kArmWord64AtomicNarrowExchangeUint8:
case kArmWord64AtomicNarrowExchangeUint16:
case kArmWord64AtomicNarrowExchangeUint32:
case kArmWord64AtomicNarrowCompareExchangeUint8:
case kArmWord64AtomicNarrowCompareExchangeUint16:
case kArmWord64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect; return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
......
...@@ -2389,7 +2389,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); } ...@@ -2389,7 +2389,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT #endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_IA32 #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -2453,7 +2453,7 @@ void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) { ...@@ -2453,7 +2453,7 @@ void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) { void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif // !V8_TARGET_ARCH_IA32 #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32 !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
......
...@@ -243,6 +243,7 @@ v8_source_set("cctest_sources") { ...@@ -243,6 +243,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-64.cc", "wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc", "wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-atomics.cc", "wasm/test-run-wasm-atomics.cc",
"wasm/test-run-wasm-atomics64.cc",
"wasm/test-run-wasm-interpreter.cc", "wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc", "wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc", "wasm/test-run-wasm-module.cc",
...@@ -299,7 +300,6 @@ v8_source_set("cctest_sources") { ...@@ -299,7 +300,6 @@ v8_source_set("cctest_sources") {
"test-sync-primitives-arm64.cc", "test-sync-primitives-arm64.cc",
"test-utils-arm64.cc", "test-utils-arm64.cc",
"test-utils-arm64.h", "test-utils-arm64.h",
"wasm/test-run-wasm-atomics64.cc",
] ]
} else if (v8_current_cpu == "x86") { } else if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ### sources += [ ### gcmole(arch:ia32) ###
...@@ -309,7 +309,6 @@ v8_source_set("cctest_sources") { ...@@ -309,7 +309,6 @@ v8_source_set("cctest_sources") {
"test-code-stubs.h", "test-code-stubs.h",
"test-disasm-ia32.cc", "test-disasm-ia32.cc",
"test-log-stack-tracer.cc", "test-log-stack-tracer.cc",
"wasm/test-run-wasm-atomics64.cc",
] ]
} else if (v8_current_cpu == "mips") { } else if (v8_current_cpu == "mips") {
sources += [ ### gcmole(arch:mips) ### sources += [ ### gcmole(arch:mips) ###
...@@ -356,7 +355,6 @@ v8_source_set("cctest_sources") { ...@@ -356,7 +355,6 @@ v8_source_set("cctest_sources") {
"test-disasm-x64.cc", "test-disasm-x64.cc",
"test-log-stack-tracer.cc", "test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc", "test-macro-assembler-x64.cc",
"wasm/test-run-wasm-atomics64.cc",
] ]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ### sources += [ ### gcmole(arch:ppc) ###
......
...@@ -344,6 +344,12 @@ ...@@ -344,6 +344,12 @@
'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP], 'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP],
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips' }], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
##############################################################################
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
# TODO(mips-team): Implement I64Atomic operations on MIPS
'test-run-wasm-atomics64/*': [SKIP],
}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64'
############################################################################## ##############################################################################
['mips_arch_variant == r6', { ['mips_arch_variant == r6', {
# For MIPS[64] architecture release 6, fusion multiply-accumulate instructions # For MIPS[64] architecture release 6, fusion multiply-accumulate instructions
......
...@@ -1615,8 +1615,8 @@ TEST(LoadStoreExclusive) { ...@@ -1615,8 +1615,8 @@ TEST(LoadStoreExclusive) {
COMPARE(strexh(r0, r1, r2), "e1e20f91 strexh r0, r1, [r2]"); COMPARE(strexh(r0, r1, r2), "e1e20f91 strexh r0, r1, [r2]");
COMPARE(ldrex(r0, r1), "e1910f9f ldrex r0, [r1]"); COMPARE(ldrex(r0, r1), "e1910f9f ldrex r0, [r1]");
COMPARE(strex(r0, r1, r2), "e1820f91 strex r0, r1, [r2]"); COMPARE(strex(r0, r1, r2), "e1820f91 strex r0, r1, [r2]");
COMPARE(ldrexd(r0, r1, MemOperand(r2)), "e1b20f9f ldrexd r0, [r2]"); COMPARE(ldrexd(r0, r1, r2), "e1b20f9f ldrexd r0, [r2]");
COMPARE(strexd(r0, r2, r3, MemOperand(r4)), COMPARE(strexd(r0, r2, r3, r4),
"e1a40f92 strexd r0, r2, [r4]"); "e1a40f92 strexd r0, r2, [r4]");
VERIFY_RUN(); VERIFY_RUN();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment