Commit 3c83ffb9 authored by Vasili Skurydzin's avatar Vasili Skurydzin Committed by Commit Bot

PPC/s390: Implement 32-bit atomic operations

Implement atomic compare exchange and atomic bin OPs for
PPC and s390

Change-Id: I8f89a0ebb912082c4c1e6b9a3daf64f28c114010
Reviewed-on: https://chromium-review.googlesource.com/1013861Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#53165}
parent 17875b01
...@@ -659,6 +659,60 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, ...@@ -659,6 +659,60 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
__ bne(&exchange, cr0); \ __ bne(&exchange, cr0); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
do { \
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
Label binop; \
__ bind(&binop); \
__ load_inst(i.OutputRegister(), operand); \
__ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
__ store_inst(i.InputRegister(2), operand); \
__ bne(&binop, cr0); \
} while (false)
#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, \
store_inst, ext_instr) \
do { \
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
Label binop; \
__ bind(&binop); \
__ load_inst(i.OutputRegister(), operand); \
__ ext_instr(i.OutputRegister(), i.OutputRegister()); \
__ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
__ store_inst(i.InputRegister(2), operand); \
__ bne(&binop, cr0); \
} while (false)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst) \
do { \
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
Label loop; \
Label exit; \
__ bind(&loop); \
__ load_inst(i.OutputRegister(), operand); \
__ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
__ bne(&exit, cr0); \
__ store_inst(i.InputRegister(3), operand); \
__ bne(&loop, cr0); \
__ bind(&exit); \
} while (false)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
store_inst, ext_instr) \
do { \
MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
Label loop; \
Label exit; \
__ bind(&loop); \
__ load_inst(i.OutputRegister(), operand); \
__ ext_instr(i.OutputRegister(), i.OutputRegister()); \
__ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
__ bne(&exit, cr0); \
__ store_inst(i.InputRegister(3), operand); \
__ bne(&loop, cr0); \
__ bind(&exit); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() { void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL); __ LeaveFrame(StackFrame::MANUAL);
} }
...@@ -1959,6 +2013,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1959,6 +2013,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicExchangeWord32: case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
break; break;
case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb);
break;
case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx);
break;
case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lharx, sthcx, extsh);
break;
case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx);
break;
case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
break; \
case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
break; \
case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
break; \
case kWord32Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
break; \
case kWord32Atomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
ATOMIC_BINOP_CASE(And, and_)
ATOMIC_BINOP_CASE(Or, orx)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
default: default:
UNREACHABLE(); UNREACHABLE();
break; break;
......
...@@ -1995,18 +1995,100 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) { ...@@ -1995,18 +1995,100 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
} }
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNIMPLEMENTED(); PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_MRR;
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
Emit(code, output_count, outputs, input_count, inputs);
} }
void InstructionSelector::VisitWord32AtomicAdd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
void InstructionSelector::VisitWord32AtomicSub(Node* node) { UNIMPLEMENTED(); } if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Int16()) {
opcode = int16_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = word32_op;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_MRR;
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
void InstructionSelector::VisitWord32AtomicAnd(Node* node) { UNIMPLEMENTED(); } InstructionOperand outputs[1];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
void InstructionSelector::VisitWord32AtomicOr(Node* node) { UNIMPLEMENTED(); } Emit(code, output_count, outputs, input_count, inputs);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
VisitWord32AtomicBinaryOperation( \
node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE(); UNREACHABLE();
......
This diff is collapsed.
...@@ -2268,24 +2268,124 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) { ...@@ -2268,24 +2268,124 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(index); inputs[input_count++] = g.UseUniqueRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value); inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1]; InstructionOperand outputs[1];
outputs[0] = g.UseUniqueRegister(node); outputs[0] = g.DefineAsRegister(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs); Emit(code, 1, outputs, input_count, inputs);
} }
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNIMPLEMENTED(); S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kWord32AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
opcode = kWord32AtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kWord32AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = kWord32AtomicCompareExchangeWord32;
} else {
UNREACHABLE();
return;
}
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
inputs[input_count++] = g.UseUniqueRegister(base);
AddressingMode addressing_mode;
if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MRR;
}
InstructionOperand outputs[1];
size_t output_count = 0;
outputs[output_count++] = g.DefineSameAsFirst(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, output_count, outputs, input_count, inputs);
} }
void InstructionSelector::VisitWord32AtomicAdd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
S390OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
void InstructionSelector::VisitWord32AtomicSub(Node* node) { UNIMPLEMENTED(); } if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Int16()) {
opcode = int16_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
opcode = word32_op;
} else {
UNREACHABLE();
return;
}
void InstructionSelector::VisitWord32AtomicAnd(Node* node) { UNIMPLEMENTED(); } InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
AddressingMode addressing_mode;
if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MRR;
}
void InstructionSelector::VisitWord32AtomicOr(Node* node) { UNIMPLEMENTED(); } inputs[input_count++] = g.UseUniqueRegister(value);
void InstructionSelector::VisitWord32AtomicXor(Node* node) { UNIMPLEMENTED(); } InstructionOperand outputs[1];
size_t output_count = 0;
outputs[output_count++] = g.DefineAsRegister(node);
InstructionOperand temps[1];
size_t temp_count = 0;
temps[temp_count++] = g.TempRegister();
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, output_count, outputs, input_count, inputs, temp_count, temps);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
VisitWord32AtomicBinaryOperation( \
node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
kWord32Atomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
......
...@@ -3348,6 +3348,16 @@ void TurboAssembler::CmpP(Register dst, const MemOperand& opnd) { ...@@ -3348,6 +3348,16 @@ void TurboAssembler::CmpP(Register dst, const MemOperand& opnd) {
#endif #endif
} }
// Using cs or scy based on the offset
void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
const MemOperand& opnd) {
if (is_uint12(opnd.offset())) {
cs(old_val, new_val, opnd);
} else {
csy(old_val, new_val, opnd);
}
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Compare Logical Helpers // Compare Logical Helpers
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
...@@ -4046,6 +4056,14 @@ void TurboAssembler::StoreW(Register src, const MemOperand& mem, ...@@ -4046,6 +4056,14 @@ void TurboAssembler::StoreW(Register src, const MemOperand& mem,
} }
} }
void TurboAssembler::LoadHalfWordP(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
lghr(dst, src);
#else
lhr(dst, src);
#endif
}
// Loads 16-bits half-word value from memory and sign extends to pointer // Loads 16-bits half-word value from memory and sign extends to pointer
// sized register // sized register
void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem, void TurboAssembler::LoadHalfWordP(Register dst, const MemOperand& mem,
......
...@@ -399,6 +399,7 @@ class TurboAssembler : public Assembler { ...@@ -399,6 +399,7 @@ class TurboAssembler : public Assembler {
void CmpP(Register dst, const Operand& opnd); void CmpP(Register dst, const Operand& opnd);
void Cmp32(Register dst, const MemOperand& opnd); void Cmp32(Register dst, const MemOperand& opnd);
void CmpP(Register dst, const MemOperand& opnd); void CmpP(Register dst, const MemOperand& opnd);
void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
// Compare Logical // Compare Logical
void CmpLogical32(Register src1, Register src2); void CmpLogical32(Register src1, Register src2);
...@@ -767,6 +768,8 @@ class TurboAssembler : public Assembler { ...@@ -767,6 +768,8 @@ class TurboAssembler : public Assembler {
void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg); void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
void LoadHalfWordP(Register dst, Register src);
void LoadHalfWordP(Register dst, const MemOperand& mem, void LoadHalfWordP(Register dst, const MemOperand& mem,
Register scratch = no_reg); Register scratch = no_reg);
......
...@@ -7139,9 +7139,13 @@ EVALUATE(LLGCR) { ...@@ -7139,9 +7139,13 @@ EVALUATE(LLGCR) {
} }
EVALUATE(LLGHR) { EVALUATE(LLGHR) {
UNIMPLEMENTED(); DCHECK_OPCODE(LLGHR);
USE(instr); DECODE_RRE_INSTRUCTION(r1, r2);
return 0; uint64_t r2_val = get_low_register<uint64_t>(r2);
r2_val <<= 48;
r2_val >>= 48;
set_register(r1, r2_val);
return length;
} }
EVALUATE(MLGR) { EVALUATE(MLGR) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment