Commit 39e96a10 authored by andrew-cc-chen's avatar andrew-cc-chen Committed by Commit Bot

s390: Implemented Atomic64 operations

Change-Id: I7591ccc55405a2fbd258bf28d53cd40a4bddf2c2
Reviewed-on: https://chromium-review.googlesource.com/1255102Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#56344}
parent 3daa6a1d
......@@ -2427,7 +2427,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
}
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_S390
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
......@@ -2451,7 +2452,8 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 &&
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
......
......@@ -1036,6 +1036,17 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ LoadlW(result, result); \
} while (false)
#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op) \
do { \
Register value = i.InputRegister(2); \
Register result = i.OutputRegister(0); \
Register addr = r1; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode); \
__ lay(addr, op); \
__ load_and_op(result, value, MemOperand(addr)); \
} while (false)
#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
do { \
Label do_cs; \
......@@ -1051,6 +1062,22 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bne(&do_cs, Label::kNear); \
} while (false)
#define ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end) \
do { \
Label do_cs; \
__ lg(prev, MemOperand(addr, offset)); \
__ bind(&do_cs); \
__ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
Operand(static_cast<intptr_t>(shift_amount)), \
true); \
__ bin_inst(new_val, prev, temp); \
__ lgr(temp, prev); \
__ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
Operand::Zero(), false); \
__ CmpAndSwap64(prev, temp, MemOperand(addr, offset)); \
__ bne(&do_cs, Label::kNear); \
} while (false)
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
{ \
......@@ -1070,6 +1097,15 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
extract_result(); \
}
#define ATOMIC_BIN_OP_WORD(bin_inst, index, extract_result) \
{ \
constexpr int offset = -(4 * index); \
constexpr int shift_amount = 32 - (index * 32); \
constexpr int start = 32 - shift_amount; \
constexpr int end = start + 31; \
ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end); \
extract_result(); \
}
#else
#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
{ \
......@@ -1089,6 +1125,15 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
extract_result(); \
}
#define ATOMIC_BIN_OP_WORD(bin_inst, index, extract_result) \
{ \
constexpr int offset = -(4 * index); \
constexpr int shift_amount = index * 32; \
constexpr int start = 32 - shift_amount; \
constexpr int end = start + 31; \
ATOMIC64_BIN_OP(bin_inst, offset, shift_amount, start, end); \
extract_result(); \
}
#endif // V8_TARGET_BIG_ENDIAN
#define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
......@@ -1147,6 +1192,311 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bind(&done); \
} while (false)
#define ASSEMBLE_ATOMIC64_BINOP_BYTE(bin_inst, extract_result) \
do { \
Register value = i.InputRegister(2); \
Register result = i.OutputRegister(0); \
Register addr = i.TempRegister(0); \
Register prev = r0; \
Register new_val = r1; \
Register temp = kScratchReg; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode); \
Label done, leftmost0, leftmost1, two, three, four, five, seven; \
__ lay(addr, op); \
__ tmll(addr, Operand(7)); \
__ b(Condition(1), &seven); \
__ b(Condition(2), &leftmost1); \
__ b(Condition(4), &leftmost0); \
/* ending with 0b000 */ \
ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
__ b(&done); \
/* ending in 0b001 to 0b011 */ \
__ bind(&leftmost0); \
__ tmll(addr, Operand(3)); \
__ b(Condition(1), &three); \
__ b(Condition(2), &two); \
ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
__ b(&done); \
/* ending in 0b010 */ \
__ bind(&two); \
ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
__ b(&done); \
/* ending in 0b011 */ \
__ bind(&three); \
ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
__ b(&done); \
/* ending in 0b100 to 0b110 */ \
__ bind(&leftmost1); \
__ tmll(addr, Operand(3)); \
__ b(Condition(8), &four); \
__ b(Condition(4), &five); \
ATOMIC_BIN_OP_BYTE(bin_inst, 6, extract_result); \
__ b(&done); \
/* ending in 0b100 */ \
__ bind(&four); \
ATOMIC_BIN_OP_BYTE(bin_inst, 4, extract_result); \
__ b(&done); \
/* ending in 0b101 */ \
__ bind(&five); \
ATOMIC_BIN_OP_BYTE(bin_inst, 5, extract_result); \
__ b(&done); \
/* ending in 0b111 */ \
__ bind(&seven); \
ATOMIC_BIN_OP_BYTE(bin_inst, 7, extract_result); \
__ bind(&done); \
} while (false)
#define ASSEMBLE_ATOMIC64_BINOP_HALFWORD(bin_inst, extract_result) \
do { \
Register value = i.InputRegister(2); \
Register result = i.OutputRegister(0); \
Register prev = i.TempRegister(0); \
Register new_val = r0; \
Register addr = r1; \
Register temp = kScratchReg; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode); \
Label done, one, two, three; \
__ lay(addr, op); \
__ tmll(addr, Operand(6)); \
__ b(Condition(1), &three); \
__ b(Condition(2), &two); \
__ b(Condition(4), &one); \
/* ending in 0b00 */ \
ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
__ b(&done); \
/* ending in 0b01 */ \
__ bind(&one); \
ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
__ b(&done); \
/* ending in 0b10 */ \
__ bind(&two); \
ATOMIC_BIN_OP_HALFWORD(bin_inst, 2, extract_result); \
__ b(&done); \
/* ending in 0b11 */ \
__ bind(&three); \
ATOMIC_BIN_OP_HALFWORD(bin_inst, 3, extract_result); \
__ bind(&done); \
} while (false)
#define ASSEMBLE_ATOMIC64_BINOP_WORD(bin_inst, extract_result) \
do { \
Register value = i.InputRegister(2); \
Register result = i.OutputRegister(0); \
Register prev = i.TempRegister(0); \
Register new_val = r0; \
Register addr = r1; \
Register temp = kScratchReg; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode); \
Label done, one; \
__ lay(addr, op); \
__ tmll(addr, Operand(4)); \
__ b(Condition(2), &one); \
/* ending in 0b000 */ \
ATOMIC_BIN_OP_WORD(bin_inst, 0, extract_result); \
__ b(&done); \
__ bind(&one); \
/* ending in 0b100 */ \
ATOMIC_BIN_OP_WORD(bin_inst, 1, extract_result); \
__ bind(&done); \
} while (false)
#define ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, offset) \
{ \
__ lg(temp0, MemOperand(addr, offset)); \
__ lgr(temp1, temp0); \
__ RotateInsertSelectBits(temp0, old_val, Operand(start), \
Operand(end), Operand(shift_amount), false); \
__ RotateInsertSelectBits(temp1, new_val, Operand(start), \
Operand(end), Operand(shift_amount), false); \
__ CmpAndSwap64(temp0, temp1, MemOperand(addr, offset)); \
__ RotateInsertSelectBits(output, temp0, Operand(start+shift_amount), \
Operand(end+shift_amount), Operand(64-shift_amount), true); \
}
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC64_COMP_EXCHANGE_BYTE(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 8 * idx; \
constexpr int end = start + 7; \
constexpr int shift_amount = (7 - idx) * 8; \
ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx); \
}
#define ATOMIC64_COMP_EXCHANGE_HALFWORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 16 * idx; \
constexpr int end = start + 15; \
constexpr int shift_amount = (3 - idx) * 16; \
ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#define ATOMIC64_COMP_EXCHANGE_WORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 * idx; \
constexpr int end = start + 31; \
constexpr int shift_amount = (1 - idx) * 32; \
ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 4); \
}
#else
#define ATOMIC64_COMP_EXCHANGE_BYTE(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 + 8 * (3 - idx); \
constexpr int end = start + 7; \
constexpr int shift_amount = idx * 8; \
ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx); \
}
#define ATOMIC64_COMP_EXCHANGE_HALFWORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 + 16 * (1 - idx); \
constexpr int end = start + 15; \
constexpr int shift_amount = idx * 16; \
ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#define ATOMIC64_COMP_EXCHANGE_WORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 * (1 - idx); \
constexpr int end = start + 31; \
constexpr int shift_amount = idx * 32; \
ATOMIC64_COMP_EXCHANGE(start, end, shift_amount, -idx * 4); \
}
#endif
#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_BYTE(load_and_ext) \
do { \
Register old_val = i.InputRegister(0); \
Register new_val = i.InputRegister(1); \
Register output = i.OutputRegister(); \
Register addr = kScratchReg; \
Register temp0 = r0; \
Register temp1 = r1; \
size_t index = 2; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode, &index); \
Label done, leftmost0, leftmost1, two, three, four, five, seven; \
__ lay(addr, op); \
__ tmll(addr, Operand(7)); \
__ b(Condition(1), &seven); \
__ b(Condition(2), &leftmost1); \
__ b(Condition(4), &leftmost0); \
/* ending with 0b000 */ \
ATOMIC64_COMP_EXCHANGE_BYTE(0); \
__ b(&done); \
/* ending in 0b001 to 0b011 */ \
__ bind(&leftmost0); \
__ tmll(addr, Operand(3)); \
__ b(Condition(1), &three); \
__ b(Condition(2), &two); \
ATOMIC64_COMP_EXCHANGE_BYTE(1); \
__ b(&done); \
/* ending in 0b010 */ \
__ bind(&two); \
ATOMIC64_COMP_EXCHANGE_BYTE(2); \
__ b(&done); \
/* ending in 0b011 */ \
__ bind(&three); \
ATOMIC64_COMP_EXCHANGE_BYTE(3); \
__ b(&done); \
/* ending in 0b100 to 0b110 */ \
__ bind(&leftmost1); \
__ tmll(addr, Operand(3)); \
__ b(Condition(8), &four); \
__ b(Condition(4), &five); \
ATOMIC64_COMP_EXCHANGE_BYTE(6); \
__ b(&done); \
/* ending in 0b100 */ \
__ bind(&four); \
ATOMIC64_COMP_EXCHANGE_BYTE(4); \
__ b(&done); \
/* ending in 0b101 */ \
__ bind(&five); \
ATOMIC64_COMP_EXCHANGE_BYTE(5); \
__ b(&done); \
/* ending in 0b111 */ \
__ bind(&seven); \
ATOMIC64_COMP_EXCHANGE_BYTE(7); \
__ bind(&done); \
__ load_and_ext(output, output); \
} while (false)
#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_HALFWORD(load_and_ext) \
do { \
Register old_val = i.InputRegister(0); \
Register new_val = i.InputRegister(1); \
Register output = i.OutputRegister(); \
Register addr = kScratchReg; \
Register temp0 = r0; \
Register temp1 = r1; \
size_t index = 2; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode, &index); \
Label done, one, two, three; \
__ lay(addr, op); \
__ tmll(addr, Operand(6)); \
__ b(Condition(1), &three); \
__ b(Condition(2), &two); \
__ b(Condition(4), &one); \
/* ending in 0b00 */ \
ATOMIC64_COMP_EXCHANGE_HALFWORD(0); \
__ b(&done); \
/* ending in 0b01 */ \
__ bind(&one); \
ATOMIC64_COMP_EXCHANGE_HALFWORD(1); \
__ b(&done); \
/* ending in 0b10 */ \
__ bind(&two); \
ATOMIC64_COMP_EXCHANGE_HALFWORD(2); \
__ b(&done); \
/* ending in 0b11 */ \
__ bind(&three); \
ATOMIC64_COMP_EXCHANGE_HALFWORD(3); \
__ bind(&done); \
__ load_and_ext(output, output); \
} while (false)
#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD(load_and_ext) \
do { \
Register old_val = i.InputRegister(0); \
Register new_val = i.InputRegister(1); \
Register output = i.OutputRegister(); \
Register addr = kScratchReg; \
Register temp0 = r0; \
Register temp1 = r1; \
size_t index = 2; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode, &index); \
Label done, one; \
__ lay(addr, op); \
__ tmll(addr, Operand(4)); \
__ b(Condition(2), &one); \
/* ending in 0b000 */ \
ATOMIC64_COMP_EXCHANGE_WORD(0); \
__ b(&done); \
__ bind(&one); \
/* ending in 0b100 */ \
ATOMIC64_COMP_EXCHANGE_WORD(1); \
__ bind(&done); \
__ load_and_ext(output, output); \
} while (false)
#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
do { \
Register new_val = i.InputRegister(1); \
Register output = i.OutputRegister(); \
Register addr = kScratchReg; \
size_t index = 2; \
AddressingMode mode = kMode_None; \
MemOperand op = i.MemoryOperand(&mode, &index); \
__ lay(addr, op); \
__ CmpAndSwap64(output, new_val, MemOperand(addr)); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
......@@ -2748,6 +3098,260 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
case kS390_Word64AtomicLoadUint8:
__ LoadlB(i.OutputRegister(), i.MemoryOperand());
break;
case kS390_Word64AtomicLoadUint16:
__ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
break;
case kS390_Word64AtomicLoadUint32:
__ LoadlW(i.OutputRegister(), i.MemoryOperand());
break;
case kS390_Word64AtomicLoadUint64:
__ lg(i.OutputRegister(), i.MemoryOperand());
break;
case kS390_Word64AtomicStoreUint8:
__ StoreByte(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
case kS390_Word64AtomicStoreUint16:
__ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
case kS390_Word64AtomicStoreUint32:
__ StoreW(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
case kS390_Word64AtomicStoreUint64:
__ stg(i.InputRegister(0), i.MemoryOperand(nullptr, 1));
break;
#define ATOMIC64_BINOP_CASE(op, inst) \
case kS390_Word64Atomic##op##Uint8: \
ASSEMBLE_ATOMIC64_BINOP_BYTE(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
Operand(static_cast<intptr_t>(rotate_left)), \
true); \
}); \
break; \
case kS390_Word64Atomic##op##Uint16: \
ASSEMBLE_ATOMIC64_BINOP_HALFWORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
Operand(static_cast<intptr_t>(rotate_left)), \
true); \
}); \
break; \
case kS390_Word64Atomic##op##Uint32: \
ASSEMBLE_ATOMIC64_BINOP_WORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(32), Operand(63), \
Operand(static_cast<intptr_t>(rotate_left)), \
true); \
}); \
break;
ATOMIC64_BINOP_CASE(Add, AddP)
ATOMIC64_BINOP_CASE(Sub, SubP)
ATOMIC64_BINOP_CASE(And, AndP)
ATOMIC64_BINOP_CASE(Or, OrP)
ATOMIC64_BINOP_CASE(Xor, XorP)
#undef ATOMIC64_BINOP_CASE
case kS390_Word64AtomicAddUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(laag);
break;
case kS390_Word64AtomicSubUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64);
break;
case kS390_Word64AtomicAndUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(lang);
break;
case kS390_Word64AtomicOrUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(laog);
break;
case kS390_Word64AtomicXorUint64:
ASSEMBLE_ATOMIC_BINOP_WORD64(laxg);
break;
#define ATOMIC64_EXCHANGE(start, end, shift_amount, offset) \
{ \
Label do_cs; \
__ lg(output, MemOperand(r1, offset)); \
__ bind(&do_cs); \
__ lgr(r0, output); \
__ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
Operand(shift_amount), false); \
__ csg(output, r0, MemOperand(r1, offset)); \
__ bne(&do_cs, Label::kNear); \
__ srlg(output, output, Operand(shift_amount)); \
}
#ifdef V8_TARGET_BIG_ENDIAN
#define ATOMIC64_EXCHANGE_BYTE(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 8 * idx; \
constexpr int end = start + 7; \
constexpr int shift_amount = (7 - idx) * 8; \
ATOMIC64_EXCHANGE(start, end, shift_amount, -idx); \
}
#define ATOMIC64_EXCHANGE_HALFWORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 16 * idx; \
constexpr int end = start + 15; \
constexpr int shift_amount = (3 - idx) * 16; \
ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#define ATOMIC64_EXCHANGE_WORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 * idx; \
constexpr int end = start + 31; \
constexpr int shift_amount = (1 - idx) * 32; \
ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 4); \
}
#else
#define ATOMIC64_EXCHANGE_BYTE(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 + 8 * (3 - idx); \
constexpr int end = start + 7; \
constexpr int shift_amount = idx * 8; \
ATOMIC64_EXCHANGE(start, end, shift_amount, -idx); \
}
#define ATOMIC64_EXCHANGE_HALFWORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 + 16 * (1 - idx); \
constexpr int end = start + 15; \
constexpr int shift_amount = idx * 16; \
ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 2); \
}
#define ATOMIC64_EXCHANGE_WORD(i) \
{ \
constexpr int idx = (i); \
constexpr int start = 32 * (1 - idx); \
constexpr int end = start + 31; \
constexpr int shift_amount = idx * 32; \
ATOMIC64_EXCHANGE(start, end, shift_amount, -idx * 4); \
}
#endif // V8_TARGET_BIG_ENDIAN
case kS390_Word64AtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
Label done, leftmost0, leftmost1, two, three, four, five, seven;
__ la(r1, MemOperand(base, index));
__ tmll(r1, Operand(7));
__ b(Condition(1), &seven);
__ b(Condition(2), &leftmost1);
__ b(Condition(4), &leftmost0);
/* ending with 0b000 */
ATOMIC64_EXCHANGE_BYTE(0);
__ b(&done);
/* ending in 0b001 to 0b011 */
__ bind(&leftmost0);
__ tmll(r1, Operand(3));
__ b(Condition(1), &three);
__ b(Condition(2), &two);
ATOMIC64_EXCHANGE_BYTE(1);
__ b(&done);
/* ending in 0b010 */
__ bind(&two);
ATOMIC64_EXCHANGE_BYTE(2);
__ b(&done);
/* ending in 0b011 */
__ bind(&three);
ATOMIC64_EXCHANGE_BYTE(3);
__ b(&done);
/* ending in 0b100 to 0b110 */
__ bind(&leftmost1);
__ tmll(r1, Operand(3));
__ b(Condition(8), &four);
__ b(Condition(4), &five);
ATOMIC64_EXCHANGE_BYTE(6);
__ b(&done);
/* ending in 0b100 */
__ bind(&four);
ATOMIC64_EXCHANGE_BYTE(4);
__ b(&done);
/* ending in 0b101 */
__ bind(&five);
ATOMIC64_EXCHANGE_BYTE(5);
__ b(&done);
/* ending in 0b111 */
__ bind(&seven);
ATOMIC64_EXCHANGE_BYTE(7);
__ bind(&done);
break;
}
case kS390_Word64AtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
Label done, one, two, three;
__ la(r1, MemOperand(base, index));
__ tmll(r1, Operand(6));
__ b(Condition(1), &three);
__ b(Condition(2), &two);
__ b(Condition(4), &one);
/* ending in 0b00 */
ATOMIC64_EXCHANGE_HALFWORD(0);
__ b(&done);
/* ending in 0b01 */
__ bind(&one);
ATOMIC64_EXCHANGE_HALFWORD(1);
__ b(&done);
/* ending in 0b10 */
__ bind(&two);
ATOMIC64_EXCHANGE_HALFWORD(2);
__ b(&done);
/* ending in 0b11 */
__ bind(&three);
ATOMIC64_EXCHANGE_HALFWORD(3);
__ bind(&done);
break;
}
case kS390_Word64AtomicExchangeUint32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
Label done, one;
__ la(r1, MemOperand(base, index));
__ tmll(r1, Operand(4));
__ b(Condition(2), &one);
/* ending in 0b0 */
ATOMIC64_EXCHANGE_WORD(0);
__ b(&done);
__ bind(&one);
/* ending in 0b1 */
ATOMIC64_EXCHANGE_WORD(1);
__ bind(&done);
break;
}
case kS390_Word64AtomicExchangeUint64: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
Label do_cs;
__ la(r1, MemOperand(base, index));
__ lg(output, MemOperand(r1));
__ csg(output, value, MemOperand(r1));
__ bind(&do_cs);
__ bne(&do_cs, Label::kNear);
break;
}
case kS390_Word64AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_BYTE(LoadlB);
break;
case kS390_Word64AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
break;
case kS390_Word64AtomicCompareExchangeUint32:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD(LoadlW);
break;
case kS390_Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
default:
UNREACHABLE();
break;
......
......@@ -11,157 +11,193 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(S390_Abs32) \
V(S390_Abs64) \
V(S390_And32) \
V(S390_And64) \
V(S390_Or32) \
V(S390_Or64) \
V(S390_Xor32) \
V(S390_Xor64) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \
V(S390_ShiftRight32) \
V(S390_ShiftRight64) \
V(S390_ShiftRightPair) \
V(S390_ShiftRightArith32) \
V(S390_ShiftRightArith64) \
V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \
V(S390_RotRight64) \
V(S390_Not32) \
V(S390_Not64) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
V(S390_Lay) \
V(S390_Add32) \
V(S390_Add64) \
V(S390_AddPair) \
V(S390_AddFloat) \
V(S390_AddDouble) \
V(S390_Sub32) \
V(S390_Sub64) \
V(S390_SubFloat) \
V(S390_SubDouble) \
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
V(S390_Mul32WithOverflow) \
V(S390_Mul64) \
V(S390_MulHigh32) \
V(S390_MulHighU32) \
V(S390_MulFloat) \
V(S390_MulDouble) \
V(S390_Div32) \
V(S390_Div64) \
V(S390_DivU32) \
V(S390_DivU64) \
V(S390_DivFloat) \
V(S390_DivDouble) \
V(S390_Mod32) \
V(S390_Mod64) \
V(S390_ModU32) \
V(S390_ModU64) \
V(S390_ModDouble) \
V(S390_Neg32) \
V(S390_Neg64) \
V(S390_NegDouble) \
V(S390_NegFloat) \
V(S390_SqrtFloat) \
V(S390_FloorFloat) \
V(S390_CeilFloat) \
V(S390_TruncateFloat) \
V(S390_AbsFloat) \
V(S390_SqrtDouble) \
V(S390_FloorDouble) \
V(S390_CeilDouble) \
V(S390_TruncateDouble) \
V(S390_RoundDouble) \
V(S390_MaxFloat) \
V(S390_MaxDouble) \
V(S390_MinFloat) \
V(S390_MinDouble) \
V(S390_AbsDouble) \
V(S390_Cntlz32) \
V(S390_Cntlz64) \
V(S390_Popcnt32) \
V(S390_Popcnt64) \
V(S390_Cmp32) \
V(S390_Cmp64) \
V(S390_CmpFloat) \
V(S390_CmpDouble) \
V(S390_Tst32) \
V(S390_Tst64) \
V(S390_Push) \
V(S390_PushFrame) \
V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
V(S390_SignExtendWord8ToInt32) \
V(S390_SignExtendWord16ToInt32) \
V(S390_SignExtendWord8ToInt64) \
V(S390_SignExtendWord16ToInt64) \
V(S390_SignExtendWord32ToInt64) \
V(S390_Uint32ToUint64) \
V(S390_Int64ToInt32) \
V(S390_Int64ToFloat32) \
V(S390_Int64ToDouble) \
V(S390_Uint64ToFloat32) \
V(S390_Uint64ToDouble) \
V(S390_Int32ToFloat32) \
V(S390_Int32ToDouble) \
V(S390_Uint32ToFloat32) \
V(S390_Uint32ToDouble) \
V(S390_Float32ToInt64) \
V(S390_Float32ToUint64) \
V(S390_Float32ToInt32) \
V(S390_Float32ToUint32) \
V(S390_Float32ToDouble) \
V(S390_Float64SilenceNaN) \
V(S390_DoubleToInt32) \
V(S390_DoubleToUint32) \
V(S390_DoubleToInt64) \
V(S390_DoubleToUint64) \
V(S390_DoubleToFloat32) \
V(S390_DoubleExtractLowWord32) \
V(S390_DoubleExtractHighWord32) \
V(S390_DoubleInsertLowWord32) \
V(S390_DoubleInsertHighWord32) \
V(S390_DoubleConstruct) \
V(S390_BitcastInt32ToFloat32) \
V(S390_BitcastFloat32ToInt32) \
V(S390_BitcastInt64ToDouble) \
V(S390_BitcastDoubleToInt64) \
V(S390_LoadWordS8) \
V(S390_LoadWordU8) \
V(S390_LoadWordS16) \
V(S390_LoadWordU16) \
V(S390_LoadWordS32) \
V(S390_LoadWordU32) \
V(S390_LoadAndTestWord32) \
V(S390_LoadAndTestWord64) \
V(S390_LoadAndTestFloat32) \
V(S390_LoadAndTestFloat64) \
V(S390_LoadReverse16RR) \
V(S390_LoadReverse32RR) \
V(S390_LoadReverse64RR) \
V(S390_LoadReverse16) \
V(S390_LoadReverse32) \
V(S390_LoadReverse64) \
V(S390_LoadWord64) \
V(S390_LoadFloat32) \
V(S390_LoadDouble) \
V(S390_StoreWord8) \
V(S390_StoreWord16) \
V(S390_StoreWord32) \
V(S390_StoreWord64) \
V(S390_StoreReverse16) \
V(S390_StoreReverse32) \
V(S390_StoreReverse64) \
V(S390_StoreFloat32) \
V(S390_StoreDouble)
#define TARGET_ARCH_OPCODE_LIST(V) \
V(S390_Abs32) \
V(S390_Abs64) \
V(S390_And32) \
V(S390_And64) \
V(S390_Or32) \
V(S390_Or64) \
V(S390_Xor32) \
V(S390_Xor64) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \
V(S390_ShiftRight32) \
V(S390_ShiftRight64) \
V(S390_ShiftRightPair) \
V(S390_ShiftRightArith32) \
V(S390_ShiftRightArith64) \
V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \
V(S390_RotRight64) \
V(S390_Not32) \
V(S390_Not64) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
V(S390_Lay) \
V(S390_Add32) \
V(S390_Add64) \
V(S390_AddPair) \
V(S390_AddFloat) \
V(S390_AddDouble) \
V(S390_Sub32) \
V(S390_Sub64) \
V(S390_SubFloat) \
V(S390_SubDouble) \
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
V(S390_Mul32WithOverflow) \
V(S390_Mul64) \
V(S390_MulHigh32) \
V(S390_MulHighU32) \
V(S390_MulFloat) \
V(S390_MulDouble) \
V(S390_Div32) \
V(S390_Div64) \
V(S390_DivU32) \
V(S390_DivU64) \
V(S390_DivFloat) \
V(S390_DivDouble) \
V(S390_Mod32) \
V(S390_Mod64) \
V(S390_ModU32) \
V(S390_ModU64) \
V(S390_ModDouble) \
V(S390_Neg32) \
V(S390_Neg64) \
V(S390_NegDouble) \
V(S390_NegFloat) \
V(S390_SqrtFloat) \
V(S390_FloorFloat) \
V(S390_CeilFloat) \
V(S390_TruncateFloat) \
V(S390_AbsFloat) \
V(S390_SqrtDouble) \
V(S390_FloorDouble) \
V(S390_CeilDouble) \
V(S390_TruncateDouble) \
V(S390_RoundDouble) \
V(S390_MaxFloat) \
V(S390_MaxDouble) \
V(S390_MinFloat) \
V(S390_MinDouble) \
V(S390_AbsDouble) \
V(S390_Cntlz32) \
V(S390_Cntlz64) \
V(S390_Popcnt32) \
V(S390_Popcnt64) \
V(S390_Cmp32) \
V(S390_Cmp64) \
V(S390_CmpFloat) \
V(S390_CmpDouble) \
V(S390_Tst32) \
V(S390_Tst64) \
V(S390_Push) \
V(S390_PushFrame) \
V(S390_StackClaim) \
V(S390_StoreToStackSlot) \
V(S390_SignExtendWord8ToInt32) \
V(S390_SignExtendWord16ToInt32) \
V(S390_SignExtendWord8ToInt64) \
V(S390_SignExtendWord16ToInt64) \
V(S390_SignExtendWord32ToInt64) \
V(S390_Uint32ToUint64) \
V(S390_Int64ToInt32) \
V(S390_Int64ToFloat32) \
V(S390_Int64ToDouble) \
V(S390_Uint64ToFloat32) \
V(S390_Uint64ToDouble) \
V(S390_Int32ToFloat32) \
V(S390_Int32ToDouble) \
V(S390_Uint32ToFloat32) \
V(S390_Uint32ToDouble) \
V(S390_Float32ToInt64) \
V(S390_Float32ToUint64) \
V(S390_Float32ToInt32) \
V(S390_Float32ToUint32) \
V(S390_Float32ToDouble) \
V(S390_Float64SilenceNaN) \
V(S390_DoubleToInt32) \
V(S390_DoubleToUint32) \
V(S390_DoubleToInt64) \
V(S390_DoubleToUint64) \
V(S390_DoubleToFloat32) \
V(S390_DoubleExtractLowWord32) \
V(S390_DoubleExtractHighWord32) \
V(S390_DoubleInsertLowWord32) \
V(S390_DoubleInsertHighWord32) \
V(S390_DoubleConstruct) \
V(S390_BitcastInt32ToFloat32) \
V(S390_BitcastFloat32ToInt32) \
V(S390_BitcastInt64ToDouble) \
V(S390_BitcastDoubleToInt64) \
V(S390_LoadWordS8) \
V(S390_LoadWordU8) \
V(S390_LoadWordS16) \
V(S390_LoadWordU16) \
V(S390_LoadWordS32) \
V(S390_LoadWordU32) \
V(S390_LoadAndTestWord32) \
V(S390_LoadAndTestWord64) \
V(S390_LoadAndTestFloat32) \
V(S390_LoadAndTestFloat64) \
V(S390_LoadReverse16RR) \
V(S390_LoadReverse32RR) \
V(S390_LoadReverse64RR) \
V(S390_LoadReverse16) \
V(S390_LoadReverse32) \
V(S390_LoadReverse64) \
V(S390_LoadWord64) \
V(S390_LoadFloat32) \
V(S390_LoadDouble) \
V(S390_StoreWord8) \
V(S390_StoreWord16) \
V(S390_StoreWord32) \
V(S390_StoreWord64) \
V(S390_StoreReverse16) \
V(S390_StoreReverse32) \
V(S390_StoreReverse64) \
V(S390_StoreFloat32) \
V(S390_StoreDouble) \
V(S390_Word64AtomicLoadUint8) \
V(S390_Word64AtomicLoadUint16) \
V(S390_Word64AtomicLoadUint32) \
V(S390_Word64AtomicLoadUint64) \
V(S390_Word64AtomicStoreUint8) \
V(S390_Word64AtomicStoreUint16) \
V(S390_Word64AtomicStoreUint32) \
V(S390_Word64AtomicStoreUint64) \
V(S390_Word64AtomicExchangeUint8) \
V(S390_Word64AtomicExchangeUint16) \
V(S390_Word64AtomicExchangeUint32) \
V(S390_Word64AtomicExchangeUint64) \
V(S390_Word64AtomicCompareExchangeUint8) \
V(S390_Word64AtomicCompareExchangeUint16) \
V(S390_Word64AtomicCompareExchangeUint32) \
V(S390_Word64AtomicCompareExchangeUint64) \
V(S390_Word64AtomicAddUint8) \
V(S390_Word64AtomicAddUint16) \
V(S390_Word64AtomicAddUint32) \
V(S390_Word64AtomicAddUint64) \
V(S390_Word64AtomicSubUint8) \
V(S390_Word64AtomicSubUint16) \
V(S390_Word64AtomicSubUint32) \
V(S390_Word64AtomicSubUint64) \
V(S390_Word64AtomicAndUint8) \
V(S390_Word64AtomicAndUint16) \
V(S390_Word64AtomicAndUint32) \
V(S390_Word64AtomicAndUint64) \
V(S390_Word64AtomicOrUint8) \
V(S390_Word64AtomicOrUint16) \
V(S390_Word64AtomicOrUint32) \
V(S390_Word64AtomicOrUint64) \
V(S390_Word64AtomicXorUint8) \
V(S390_Word64AtomicXorUint16) \
V(S390_Word64AtomicXorUint32) \
V(S390_Word64AtomicXorUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -169,6 +169,46 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_StackClaim:
return kHasSideEffect;
case kS390_Word64AtomicLoadUint8:
case kS390_Word64AtomicLoadUint16:
case kS390_Word64AtomicLoadUint32:
case kS390_Word64AtomicLoadUint64:
return kIsLoadOperation;
case kS390_Word64AtomicStoreUint8:
case kS390_Word64AtomicStoreUint16:
case kS390_Word64AtomicStoreUint32:
case kS390_Word64AtomicStoreUint64:
case kS390_Word64AtomicExchangeUint8:
case kS390_Word64AtomicExchangeUint16:
case kS390_Word64AtomicExchangeUint32:
case kS390_Word64AtomicExchangeUint64:
case kS390_Word64AtomicCompareExchangeUint8:
case kS390_Word64AtomicCompareExchangeUint16:
case kS390_Word64AtomicCompareExchangeUint32:
case kS390_Word64AtomicCompareExchangeUint64:
case kS390_Word64AtomicAddUint8:
case kS390_Word64AtomicAddUint16:
case kS390_Word64AtomicAddUint32:
case kS390_Word64AtomicAddUint64:
case kS390_Word64AtomicSubUint8:
case kS390_Word64AtomicSubUint16:
case kS390_Word64AtomicSubUint32:
case kS390_Word64AtomicSubUint64:
case kS390_Word64AtomicAndUint8:
case kS390_Word64AtomicAndUint16:
case kS390_Word64AtomicAndUint32:
case kS390_Word64AtomicAndUint64:
case kS390_Word64AtomicOrUint8:
case kS390_Word64AtomicOrUint16:
case kS390_Word64AtomicOrUint32:
case kS390_Word64AtomicOrUint64:
case kS390_Word64AtomicXorUint8:
case kS390_Word64AtomicXorUint16:
case kS390_Word64AtomicXorUint32:
case kS390_Word64AtomicXorUint64:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
......
......@@ -2255,11 +2255,26 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
inputs);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
S390OperandGenerator g(this);
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
......@@ -2276,42 +2291,34 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs);
VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
MachineType type = AtomicOpType(node->op());
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kWord32AtomicCompareExchangeInt16;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kS390_Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kWord32AtomicCompareExchangeWord32;
opcode = kS390_Word64AtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kS390_Word64AtomicExchangeUint32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
return;
}
VisitAtomicExchange(this, node, opcode);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
InstructionOperand inputs[4];
size_t input_count = 0;
......@@ -2333,34 +2340,53 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
outputs[output_count++] = g.DefineSameAsFirst(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, output_count, outputs, input_count, inputs);
selector->Emit(code, output_count, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = int8_op;
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = int16_op;
opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = word32_op;
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kS390_Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
opcode = kS390_Word64AtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kS390_Word64AtomicCompareExchangeUint32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
return;
}
VisitAtomicCompareExchange(this, node, opcode);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
InstructionOperand inputs[3];
size_t input_count = 0;
......@@ -2386,7 +2412,31 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
temps[temp_count++] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, output_count, outputs, input_count, inputs, temp_count, temps);
selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
temps);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Int16()) {
opcode = int16_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = word32_op;
} else {
UNREACHABLE();
return;
}
VisitAtomicBinop(this, node, opcode);
}
#define VISIT_ATOMIC_BINOP(op) \
......@@ -2403,6 +2453,101 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode word32_op,
ArchOpcode word64_op) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Uint32()) {
opcode = word32_op;
} else if (type == MachineType::Uint64()) {
opcode = word64_op;
} else {
UNREACHABLE();
return;
}
VisitAtomicBinop(this, node, opcode);
}
#define VISIT_ATOMIC64_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
VisitWord64AtomicBinaryOperation( \
node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC64_BINOP(Add)
VISIT_ATOMIC64_BINOP(Sub)
VISIT_ATOMIC64_BINOP(And)
VISIT_ATOMIC64_BINOP(Or)
VISIT_ATOMIC64_BINOP(Xor)
#undef VISIT_ATOMIC64_BINOP
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kS390_Word64AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = kS390_Word64AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kS390_Word64AtomicLoadUint32;
break;
case MachineRepresentation::kWord64:
opcode = kS390_Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
return;
}
Emit(opcode | AddressingModeField::encode(kMode_MRR),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kS390_Word64AtomicStoreUint8;
break;
case MachineRepresentation::kWord16:
opcode = kS390_Word64AtomicStoreUint16;
break;
case MachineRepresentation::kWord32:
opcode = kS390_Word64AtomicStoreUint32;
break;
case MachineRepresentation::kWord64:
opcode = kS390_Word64AtomicStoreUint64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
inputs);
}
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
......
......@@ -2879,6 +2879,12 @@ void TurboAssembler::LoadAndSub32(Register dst, Register src,
laa(dst, dst, opnd);
}
void TurboAssembler::LoadAndSub64(Register dst, Register src,
const MemOperand& opnd) {
lcgr(dst, src);
laag(dst, dst, opnd);
}
//----------------------------------------------------------------------------
// Subtract Logical Instructions
//----------------------------------------------------------------------------
......@@ -3369,6 +3375,12 @@ void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
}
}
void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
const MemOperand& opnd) {
DCHECK(is_int20(opnd.offset()));
csg(old_val, new_val, opnd);
}
//-----------------------------------------------------------------------------
// Compare Logical Helpers
//-----------------------------------------------------------------------------
......
......@@ -325,6 +325,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SubP(Register dst, const MemOperand& opnd);
void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
void LoadAndSub64(Register dst, Register src, const MemOperand& opnd);
// Subtract Logical (Register - Mem)
void SubLogical(Register dst, const MemOperand& opnd);
......@@ -392,6 +393,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Cmp32(Register dst, const MemOperand& opnd);
void CmpP(Register dst, const MemOperand& opnd);
void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
void CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd);
// Compare Logical
void CmpLogical32(Register src1, Register src2);
......
......@@ -1369,6 +1369,7 @@ void Simulator::EvalTableInit() {
EvalTable[SRLG] = &Simulator::Evaluate_SRLG;
EvalTable[SLLG] = &Simulator::Evaluate_SLLG;
EvalTable[CSY] = &Simulator::Evaluate_CSY;
EvalTable[CSG] = &Simulator::Evaluate_CSG;
EvalTable[RLLG] = &Simulator::Evaluate_RLLG;
EvalTable[RLL] = &Simulator::Evaluate_RLL;
EvalTable[STMG] = &Simulator::Evaluate_STMG;
......@@ -8778,9 +8779,26 @@ EVALUATE(CSY) {
}
EVALUATE(CSG) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(CSG);
DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
int32_t offset = d2;
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t target_addr = static_cast<intptr_t>(b2_val) + offset;
int64_t r1_val = get_register(r1);
int64_t r3_val = get_register(r3);
DCHECK_EQ(target_addr & 0x3, 0);
bool is_success = __atomic_compare_exchange_n(
reinterpret_cast<int64_t*>(target_addr), &r1_val, r3_val, true,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
if (!is_success) {
set_register(r1, r1_val);
condition_reg_ = 0x4;
} else {
condition_reg_ = 0x8;
}
return length;
}
EVALUATE(RLLG) {
......@@ -9153,28 +9171,38 @@ EVALUATE(STOCG) {
return 0;
}
#define ATOMIC_LOAD_AND_UPDATE_WORD64(op) \
DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2); \
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2); \
intptr_t addr = static_cast<intptr_t>(b2_val) + d2; \
int64_t r3_val = get_register(r3); \
DCHECK_EQ(addr & 0x3, 0); \
int64_t r1_val = \
op(reinterpret_cast<int64_t*>(addr), r3_val, __ATOMIC_SEQ_CST); \
set_register(r1, r1_val);
EVALUATE(LANG) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LANG);
ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_and);
return length;
}
EVALUATE(LAOG) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LAOG);
ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_or);
return length;
}
EVALUATE(LAXG) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LAXG);
ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_xor);
return length;
}
EVALUATE(LAAG) {
UNIMPLEMENTED();
USE(instr);
return 0;
DCHECK_OPCODE(LAAG);
ATOMIC_LOAD_AND_UPDATE_WORD64(__atomic_fetch_add);
return length;
}
EVALUATE(LAALG) {
......@@ -9183,6 +9211,8 @@ EVALUATE(LAALG) {
return 0;
}
#undef ATOMIC_LOAD_AND_UPDATE_WORD64
EVALUATE(LOC) {
UNIMPLEMENTED();
USE(instr);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment