Commit d99baa26 authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Fix atomic load sequence.

R=binji@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com, bjaideep@ca.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1889693003

Cr-Commit-Position: refs/heads/master@{#35503}
parent 139617b0
...@@ -5908,6 +5908,17 @@ void ReturnAllocatedHeapNumber(MacroAssembler* masm, DoubleRegister value, ...@@ -5908,6 +5908,17 @@ void ReturnAllocatedHeapNumber(MacroAssembler* masm, DoubleRegister value,
} // anonymous namespace } // anonymous namespace
#define ASSEMBLE_ATOMIC_LOAD(instr, dst, base, index) \
do { \
Label not_taken; \
__ sync(); \
__ instr(dst, MemOperand(base, index)); \
__ bind(&not_taken); \
__ cmp(dst, dst); \
__ bne(&not_taken); \
__ isync(); \
} while (0)
void AtomicsLoadStub::Generate(MacroAssembler* masm) { void AtomicsLoadStub::Generate(MacroAssembler* masm) {
Register object = r4; Register object = r4;
Register index = r3; // Index is an untagged word32. Register index = r3; // Index is an untagged word32.
...@@ -5918,29 +5929,25 @@ void AtomicsLoadStub::Generate(MacroAssembler* masm) { ...@@ -5918,29 +5929,25 @@ void AtomicsLoadStub::Generate(MacroAssembler* masm) {
TypedArrayJumpTablePrologue(masm, object, r6, r7, &table); TypedArrayJumpTablePrologue(masm, object, r6, r7, &table);
__ bind(&i8); __ bind(&i8);
__ lbzx(r3, MemOperand(backing_store, index)); ASSEMBLE_ATOMIC_LOAD(lbzx, r3, backing_store, index);
__ lwsync();
__ extsb(r3, r3); __ extsb(r3, r3);
__ SmiTag(r3); __ SmiTag(r3);
__ blr(); __ blr();
__ bind(&u8); __ bind(&u8);
__ lbzx(r3, MemOperand(backing_store, index)); ASSEMBLE_ATOMIC_LOAD(lbzx, r3, backing_store, index);
__ lwsync();
__ SmiTag(r3); __ SmiTag(r3);
__ blr(); __ blr();
__ bind(&i16); __ bind(&i16);
__ ShiftLeftImm(index, index, Operand(1)); __ ShiftLeftImm(index, index, Operand(1));
__ lhax(r3, MemOperand(backing_store, index)); ASSEMBLE_ATOMIC_LOAD(lhax, r3, backing_store, index);
__ lwsync();
__ SmiTag(r3); __ SmiTag(r3);
__ blr(); __ blr();
__ bind(&u16); __ bind(&u16);
__ ShiftLeftImm(index, index, Operand(1)); __ ShiftLeftImm(index, index, Operand(1));
__ lhzx(r3, MemOperand(backing_store, index)); ASSEMBLE_ATOMIC_LOAD(lhzx, r3, backing_store, index);
__ lwsync();
__ SmiTag(r3); __ SmiTag(r3);
__ blr(); __ blr();
...@@ -5948,8 +5955,7 @@ void AtomicsLoadStub::Generate(MacroAssembler* masm) { ...@@ -5948,8 +5955,7 @@ void AtomicsLoadStub::Generate(MacroAssembler* masm) {
__ bind(&i32); __ bind(&i32);
__ ShiftLeftImm(index, index, Operand(2)); __ ShiftLeftImm(index, index, Operand(2));
__ lwax(r3, MemOperand(backing_store, index)); ASSEMBLE_ATOMIC_LOAD(lwax, r3, backing_store, index);
__ lwsync();
#if V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_PPC64
__ SmiTag(r3); __ SmiTag(r3);
__ blr(); __ blr();
...@@ -5959,8 +5965,7 @@ void AtomicsLoadStub::Generate(MacroAssembler* masm) { ...@@ -5959,8 +5965,7 @@ void AtomicsLoadStub::Generate(MacroAssembler* masm) {
__ bind(&u32); __ bind(&u32);
__ ShiftLeftImm(index, index, Operand(2)); __ ShiftLeftImm(index, index, Operand(2));
__ lwzx(r3, MemOperand(backing_store, index)); ASSEMBLE_ATOMIC_LOAD(lwzx, r3, backing_store, index);
__ lwsync();
ReturnUnsignedInteger32(masm, d0, r3, &use_heap_number); ReturnUnsignedInteger32(masm, d0, r3, &use_heap_number);
__ bind(&use_heap_number); __ bind(&use_heap_number);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment