Commit 8301530d authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

Add I64Atomic {Exchg, CmpExchg} operations for ia32

Bug: v8:6532
Change-Id: Ib486a1c0d80a14b778dde5ef6655e11d326b4c73
Reviewed-on: https://chromium-review.googlesource.com/1157068Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54852}
parent 8dc41a58
...@@ -3656,6 +3656,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3656,6 +3656,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchg(i.InputRegister(0), i.MemoryOperand(1)); __ xchg(i.InputRegister(0), i.MemoryOperand(1));
break; break;
} }
// For the narrow Word64 operations below, i.OutputRegister(1) contains
// the high-order 32 bits for the 64bit operation. As the data exchange
// fits in one register, the i.OutputRegister(1) needs to be cleared for
// the correct return value to be propagated back.
case kIA32Word64AtomicNarrowExchangeUint8: {
__ xchg_b(i.OutputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowExchangeUint16: {
__ xchg_w(i.OutputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowExchangeUint32: {
__ xchg(i.OutputRegister(0), i.MemoryOperand(1));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word32AtomicPairExchange: {
__ mov(i.OutputRegister(0), i.MemoryOperand(2));
__ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
break;
}
case kWord32AtomicCompareExchangeInt8: { case kWord32AtomicCompareExchangeInt8: {
__ lock(); __ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1)); __ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
...@@ -3685,6 +3713,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3685,6 +3713,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1)); __ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break; break;
} }
case kIA32Word64AtomicNarrowCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowCompareExchangeUint32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word32AtomicPairCompareExchange: {
__ lock();
__ cmpxchg8b(i.MemoryOperand(4));
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \ #define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: { \ case kWord32Atomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
......
This diff is collapsed.
...@@ -368,6 +368,13 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -368,6 +368,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence: case kLFence:
return kHasSideEffect; return kHasSideEffect;
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:
case kIA32Word32AtomicPairOr:
case kIA32Word32AtomicPairXor:
case kIA32Word32AtomicPairExchange:
case kIA32Word32AtomicPairCompareExchange:
case kIA32Word64AtomicNarrowAddUint8: case kIA32Word64AtomicNarrowAddUint8:
case kIA32Word64AtomicNarrowAddUint16: case kIA32Word64AtomicNarrowAddUint16:
case kIA32Word64AtomicNarrowAddUint32: case kIA32Word64AtomicNarrowAddUint32:
...@@ -383,11 +390,12 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -383,11 +390,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word64AtomicNarrowXorUint8: case kIA32Word64AtomicNarrowXorUint8:
case kIA32Word64AtomicNarrowXorUint16: case kIA32Word64AtomicNarrowXorUint16:
case kIA32Word64AtomicNarrowXorUint32: case kIA32Word64AtomicNarrowXorUint32:
case kIA32Word32AtomicPairAdd: case kIA32Word64AtomicNarrowExchangeUint8:
case kIA32Word32AtomicPairSub: case kIA32Word64AtomicNarrowExchangeUint16:
case kIA32Word32AtomicPairAnd: case kIA32Word64AtomicNarrowExchangeUint32:
case kIA32Word32AtomicPairOr: case kIA32Word64AtomicNarrowCompareExchangeUint8:
case kIA32Word32AtomicPairXor: case kIA32Word64AtomicNarrowCompareExchangeUint16:
case kIA32Word64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect; return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
......
...@@ -1823,6 +1823,37 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { ...@@ -1823,6 +1823,37 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairXor); VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairXor);
} }
void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairExchange);
}
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* index = node->InputAt(1);
AddressingMode addressing_mode;
InstructionOperand index_operand;
if (g.CanBeImmediate(index)) {
index_operand = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
index_operand = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionOperand inputs[] = {
// High, Low values of old value
g.UseFixed(node->InputAt(2), eax), g.UseFixed(node->InputAt(3), edx),
// High, Low values of new value
g.UseFixed(node->InputAt(4), ebx), g.UseFixed(node->InputAt(5), ecx),
// InputAt(0) => base
g.UseUniqueRegister(node->InputAt(0)), index_operand};
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
InstructionCode code = kIA32Word32AtomicPairCompareExchange |
AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node, void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
ArchOpcode uint8_op, ArchOpcode uint8_op,
ArchOpcode uint16_op, ArchOpcode uint16_op,
...@@ -1856,6 +1887,95 @@ VISIT_ATOMIC_BINOP(Or) ...@@ -1856,6 +1887,95 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor) VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP #undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = kIA32Word64AtomicNarrowExchangeUint32;
} else if (type == MachineType::Uint16()) {
opcode = kIA32Word64AtomicNarrowExchangeUint16;
} else if (type == MachineType::Uint8()) {
opcode = kIA32Word64AtomicNarrowExchangeUint8;
} else {
UNREACHABLE();
return;
}
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
if (type.representation() == MachineRepresentation::kWord8) {
inputs[input_count++] = g.UseFixed(value, edx);
} else {
inputs[input_count++] = g.UseUniqueRegister(value);
}
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionOperand outputs[2];
if (type.representation() == MachineRepresentation::kWord8) {
// Using DefineSameAsFirst requires the register to be unallocated.
outputs[0] = g.DefineAsFixed(NodeProperties::FindProjection(node, 0), edx);
} else {
outputs[0] = g.DefineSameAsFirst(NodeProperties::FindProjection(node, 0));
}
outputs[1] = g.DefineAsRegister(NodeProperties::FindProjection(node, 1));
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = kIA32Word64AtomicNarrowCompareExchangeUint32;
} else if (type == MachineType::Uint16()) {
opcode = kIA32Word64AtomicNarrowCompareExchangeUint16;
} else if (type == MachineType::Uint8()) {
opcode = kIA32Word64AtomicNarrowCompareExchangeUint8;
} else {
UNREACHABLE();
return;
}
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseFixed(old_value, eax);
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
inputs[input_count++] = g.UseByteRegister(new_value);
} else {
inputs[input_count++] = g.UseUniqueRegister(new_value);
}
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
#define SIMD_INT_TYPES(V) \ #define SIMD_INT_TYPES(V) \
V(I32x4) \ V(I32x4) \
V(I16x8) \ V(I16x8) \
......
...@@ -1737,6 +1737,8 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1737,6 +1737,8 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(And) ATOMIC_CASE(And)
ATOMIC_CASE(Or) ATOMIC_CASE(Or)
ATOMIC_CASE(Xor) ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE #undef ATOMIC_CASE
#define ATOMIC_CASE(name) \ #define ATOMIC_CASE(name) \
case IrOpcode::kWord64AtomicNarrow##name: { \ case IrOpcode::kWord64AtomicNarrow##name: { \
...@@ -1750,6 +1752,8 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1750,6 +1752,8 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(And) ATOMIC_CASE(And)
ATOMIC_CASE(Or) ATOMIC_CASE(Or)
ATOMIC_CASE(Xor) ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE #undef ATOMIC_CASE
case IrOpcode::kSpeculationFence: case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node); return VisitSpeculationFence(node);
...@@ -2399,6 +2403,14 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { ...@@ -2399,6 +2403,14 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) { void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -2418,6 +2430,14 @@ void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) { ...@@ -2418,6 +2430,14 @@ void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) { void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32 #endif // !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
......
...@@ -126,9 +126,7 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) { ...@@ -126,9 +126,7 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
ReplaceNodeWithProjections(node); ReplaceNodeWithProjections(node);
} }
void Int64Lowering::LowerWord64AtomicNarrowBinop(Node* node, void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
const Operator* op) {
DCHECK_EQ(5, node->InputCount());
DefaultLowering(node, true); DefaultLowering(node, true);
NodeProperties::ChangeOp(node, op); NodeProperties::ChangeOp(node, op);
ReplaceNodeWithProjections(node); ReplaceNodeWithProjections(node);
...@@ -884,23 +882,42 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -884,23 +882,42 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs(); node->NullAllInputs();
break; break;
} }
#define ATOMIC_CASE(name) \ #define ATOMIC_CASE(name) \
case IrOpcode::kWord64Atomic##name: { \ case IrOpcode::kWord64Atomic##name: { \
MachineType type = AtomicOpType(node->op()); \ MachineType type = AtomicOpType(node->op()); \
if (type == MachineType::Uint64()) { \ if (type == MachineType::Uint64()) { \
LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \ LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \
} else { \ } else { \
LowerWord64AtomicNarrowBinop(node, \ LowerWord64AtomicNarrowOp(node, \
machine()->Word64AtomicNarrow##name(type)); \ machine()->Word64AtomicNarrow##name(type)); \
} \ } \
break; \ break; \
} }
ATOMIC_CASE(Add) ATOMIC_CASE(Add)
ATOMIC_CASE(Sub) ATOMIC_CASE(Sub)
ATOMIC_CASE(And) ATOMIC_CASE(And)
ATOMIC_CASE(Or) ATOMIC_CASE(Or)
ATOMIC_CASE(Xor) ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange)
#undef ATOMIC_CASE #undef ATOMIC_CASE
case IrOpcode::kWord64AtomicCompareExchange: {
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint64()) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
node->ReplaceInput(2, GetReplacementLow(old_value));
node->ReplaceInput(3, GetReplacementHigh(old_value));
node->InsertInput(zone(), 4, GetReplacementLow(new_value));
node->InsertInput(zone(), 5, GetReplacementHigh(new_value));
NodeProperties::ChangeOp(node,
machine()->Word32AtomicPairCompareExchange());
ReplaceNodeWithProjections(node);
} else {
LowerWord64AtomicNarrowOp(
node, machine()->Word64AtomicNarrowCompareExchange(type));
}
break;
}
default: { DefaultLowering(node); } default: { DefaultLowering(node); }
} }
......
...@@ -51,7 +51,7 @@ class V8_EXPORT_PRIVATE Int64Lowering { ...@@ -51,7 +51,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
void LowerComparison(Node* node, const Operator* signed_op, void LowerComparison(Node* node, const Operator* signed_op,
const Operator* unsigned_op); const Operator* unsigned_op);
void LowerWord64AtomicBinop(Node* node, const Operator* op); void LowerWord64AtomicBinop(Node* node, const Operator* op);
void LowerWord64AtomicNarrowBinop(Node* node, const Operator* op); void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
void ReplaceNode(Node* old, Node* new_low, Node* new_high); void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node); bool HasReplacementLow(Node* node);
......
...@@ -419,7 +419,8 @@ MachineType AtomicOpType(Operator const* op) { ...@@ -419,7 +419,8 @@ MachineType AtomicOpType(Operator const* op) {
V(Sub) \ V(Sub) \
V(And) \ V(And) \
V(Or) \ V(Or) \
V(Xor) V(Xor) \
V(Exchange)
#define SIMD_LANE_OP_LIST(V) \ #define SIMD_LANE_OP_LIST(V) \
V(F32x4, 4) \ V(F32x4, 4) \
...@@ -712,11 +713,34 @@ struct MachineOperatorGlobalCache { ...@@ -712,11 +713,34 @@ struct MachineOperatorGlobalCache {
ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \ ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \ ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \ ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type) ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowExchange, type)
ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST) ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST #undef ATOMIC_OP_LIST
#undef ATOMIC64_NARROW_OP #undef ATOMIC64_NARROW_OP
struct Word32AtomicPairCompareExchangeOperator : public Operator {
Word32AtomicPairCompareExchangeOperator()
: Operator(IrOpcode::kWord32AtomicPairCompareExchange,
Operator::kNoDeopt | Operator::kNoThrow,
"Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
};
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
#define ATOMIC_COMPARE_EXCHANGE(Type) \
struct Word64AtomicNarrowCompareExchange##Type##Operator \
: public Operator1<MachineType> { \
Word64AtomicNarrowCompareExchange##Type##Operator() \
: Operator1<MachineType>(IrOpcode::kWord64AtomicNarrowCompareExchange, \
Operator::kNoDeopt | Operator::kNoThrow, \
"Word64AtomicNarrowCompareExchange", 4, 1, 1, \
2, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicNarrowCompareExchange##Type##Operator \
kWord64AtomicNarrowCompareExchange##Type;
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
// The {BitcastWordToTagged} operator must not be marked as pure (especially // The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler // not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live // might decide to split these operators, thus potentially creating live
...@@ -1187,6 +1211,14 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairXor() { ...@@ -1187,6 +1211,14 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
return &cache_.kWord32AtomicPairXor; return &cache_.kWord32AtomicPairXor;
} }
const Operator* MachineOperatorBuilder::Word32AtomicPairExchange() {
return &cache_.kWord32AtomicPairExchange;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd( const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd(
MachineType type) { MachineType type) {
#define ADD(kType) \ #define ADD(kType) \
...@@ -1241,6 +1273,28 @@ const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor( ...@@ -1241,6 +1273,28 @@ const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor(
UNREACHABLE(); UNREACHABLE();
} }
const Operator* MachineOperatorBuilder::Word64AtomicNarrowExchange(
MachineType type) {
#define EXCHANGE(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowExchange##kType; \
}
ATOMIC_U32_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowCompareExchange(
MachineType type) {
#define CMP_EXCHANGE(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowCompareExchange##kType; \
}
ATOMIC_U32_TYPE_LIST(CMP_EXCHANGE)
#undef CMP_EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() { const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
return &cache_.kTaggedPoisonOnSpeculation; return &cache_.kTaggedPoisonOnSpeculation;
} }
......
...@@ -661,6 +661,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -661,6 +661,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word64AtomicNarrowOr(MachineType type); const Operator* Word64AtomicNarrowOr(MachineType type);
// atomic-narrow-xor [base + index], value // atomic-narrow-xor [base + index], value
const Operator* Word64AtomicNarrowXor(MachineType type); const Operator* Word64AtomicNarrowXor(MachineType type);
// atomic-narrow-exchange [base + index], value
const Operator* Word64AtomicNarrowExchange(MachineType type);
// atomic-narrow-compare-exchange [base + index], old_value, new_value
const Operator* Word64AtomicNarrowCompareExchange(MachineType type);
// atomic-pair-add [base + index], value_high, value_low // atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd(); const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low // atomic-pair-sub [base + index], value_high, value-low
...@@ -671,6 +675,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -671,6 +675,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word32AtomicPairOr(); const Operator* Word32AtomicPairOr();
// atomic-pair-xor [base + index], value_high, value_low // atomic-pair-xor [base + index], value_high, value_low
const Operator* Word32AtomicPairXor(); const Operator* Word32AtomicPairXor();
// atomic-pair-exchange [base + index], value_high, value_low
const Operator* Word32AtomicPairExchange();
// atomic-pair-compare-exchange [base + index], old_value_high, old_value_low,
// new_value_high, new_value_low
const Operator* Word32AtomicPairCompareExchange();
const OptionalOperator SpeculationFence(); const OptionalOperator SpeculationFence();
......
...@@ -576,105 +576,109 @@ ...@@ -576,105 +576,109 @@
V(Word64AtomicNarrowSub) \ V(Word64AtomicNarrowSub) \
V(Word64AtomicNarrowAnd) \ V(Word64AtomicNarrowAnd) \
V(Word64AtomicNarrowOr) \ V(Word64AtomicNarrowOr) \
V(Word64AtomicNarrowXor) V(Word64AtomicNarrowXor) \
V(Word64AtomicNarrowExchange) \
#define MACHINE_OP_LIST(V) \ V(Word64AtomicNarrowCompareExchange)
MACHINE_UNOP_32_LIST(V) \
MACHINE_BINOP_32_LIST(V) \ #define MACHINE_OP_LIST(V) \
MACHINE_BINOP_64_LIST(V) \ MACHINE_UNOP_32_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \ MACHINE_BINOP_32_LIST(V) \
MACHINE_FLOAT32_BINOP_LIST(V) \ MACHINE_BINOP_64_LIST(V) \
MACHINE_FLOAT32_UNOP_LIST(V) \ MACHINE_COMPARE_BINOP_LIST(V) \
MACHINE_FLOAT64_BINOP_LIST(V) \ MACHINE_FLOAT32_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \ MACHINE_FLOAT32_UNOP_LIST(V) \
MACHINE_WORD64_ATOMIC_OP_LIST(V) \ MACHINE_FLOAT64_BINOP_LIST(V) \
V(DebugAbort) \ MACHINE_FLOAT64_UNOP_LIST(V) \
V(DebugBreak) \ MACHINE_WORD64_ATOMIC_OP_LIST(V) \
V(Comment) \ V(DebugAbort) \
V(Load) \ V(DebugBreak) \
V(PoisonedLoad) \ V(Comment) \
V(Store) \ V(Load) \
V(StackSlot) \ V(PoisonedLoad) \
V(Word32Popcnt) \ V(Store) \
V(Word64Popcnt) \ V(StackSlot) \
V(Word64Clz) \ V(Word32Popcnt) \
V(Word64Ctz) \ V(Word64Popcnt) \
V(Word64ReverseBits) \ V(Word64Clz) \
V(Word64ReverseBytes) \ V(Word64Ctz) \
V(Int64AbsWithOverflow) \ V(Word64ReverseBits) \
V(BitcastTaggedToWord) \ V(Word64ReverseBytes) \
V(BitcastWordToTagged) \ V(Int64AbsWithOverflow) \
V(BitcastWordToTaggedSigned) \ V(BitcastTaggedToWord) \
V(TruncateFloat64ToWord32) \ V(BitcastWordToTagged) \
V(ChangeFloat32ToFloat64) \ V(BitcastWordToTaggedSigned) \
V(ChangeFloat64ToInt32) \ V(TruncateFloat64ToWord32) \
V(ChangeFloat64ToUint32) \ V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToUint64) \ V(ChangeFloat64ToInt32) \
V(Float64SilenceNaN) \ V(ChangeFloat64ToUint32) \
V(TruncateFloat64ToUint32) \ V(ChangeFloat64ToUint64) \
V(TruncateFloat32ToInt32) \ V(Float64SilenceNaN) \
V(TruncateFloat32ToUint32) \ V(TruncateFloat64ToUint32) \
V(TryTruncateFloat32ToInt64) \ V(TruncateFloat32ToInt32) \
V(TryTruncateFloat64ToInt64) \ V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToUint64) \ V(TryTruncateFloat32ToInt64) \
V(TryTruncateFloat64ToUint64) \ V(TryTruncateFloat64ToInt64) \
V(ChangeInt32ToFloat64) \ V(TryTruncateFloat32ToUint64) \
V(ChangeInt32ToInt64) \ V(TryTruncateFloat64ToUint64) \
V(ChangeUint32ToFloat64) \ V(ChangeInt32ToFloat64) \
V(ChangeUint32ToUint64) \ V(ChangeInt32ToInt64) \
V(TruncateFloat64ToFloat32) \ V(ChangeUint32ToFloat64) \
V(TruncateInt64ToInt32) \ V(ChangeUint32ToUint64) \
V(RoundFloat64ToInt32) \ V(TruncateFloat64ToFloat32) \
V(RoundInt32ToFloat32) \ V(TruncateInt64ToInt32) \
V(RoundInt64ToFloat32) \ V(RoundFloat64ToInt32) \
V(RoundInt64ToFloat64) \ V(RoundInt32ToFloat32) \
V(RoundUint32ToFloat32) \ V(RoundInt64ToFloat32) \
V(RoundUint64ToFloat32) \ V(RoundInt64ToFloat64) \
V(RoundUint64ToFloat64) \ V(RoundUint32ToFloat32) \
V(BitcastFloat32ToInt32) \ V(RoundUint64ToFloat32) \
V(BitcastFloat64ToInt64) \ V(RoundUint64ToFloat64) \
V(BitcastInt32ToFloat32) \ V(BitcastFloat32ToInt32) \
V(BitcastInt64ToFloat64) \ V(BitcastFloat64ToInt64) \
V(Float64ExtractLowWord32) \ V(BitcastInt32ToFloat32) \
V(Float64ExtractHighWord32) \ V(BitcastInt64ToFloat64) \
V(Float64InsertLowWord32) \ V(Float64ExtractLowWord32) \
V(Float64InsertHighWord32) \ V(Float64ExtractHighWord32) \
V(TaggedPoisonOnSpeculation) \ V(Float64InsertLowWord32) \
V(Word32PoisonOnSpeculation) \ V(Float64InsertHighWord32) \
V(Word64PoisonOnSpeculation) \ V(TaggedPoisonOnSpeculation) \
V(LoadStackPointer) \ V(Word32PoisonOnSpeculation) \
V(LoadFramePointer) \ V(Word64PoisonOnSpeculation) \
V(LoadParentFramePointer) \ V(LoadStackPointer) \
V(UnalignedLoad) \ V(LoadFramePointer) \
V(UnalignedStore) \ V(LoadParentFramePointer) \
V(Int32PairAdd) \ V(UnalignedLoad) \
V(Int32PairSub) \ V(UnalignedStore) \
V(Int32PairMul) \ V(Int32PairAdd) \
V(Word32PairShl) \ V(Int32PairSub) \
V(Word32PairShr) \ V(Int32PairMul) \
V(Word32PairSar) \ V(Word32PairShl) \
V(ProtectedLoad) \ V(Word32PairShr) \
V(ProtectedStore) \ V(Word32PairSar) \
V(Word32AtomicLoad) \ V(ProtectedLoad) \
V(Word32AtomicStore) \ V(ProtectedStore) \
V(Word32AtomicExchange) \ V(Word32AtomicLoad) \
V(Word32AtomicCompareExchange) \ V(Word32AtomicStore) \
V(Word32AtomicAdd) \ V(Word32AtomicExchange) \
V(Word32AtomicSub) \ V(Word32AtomicCompareExchange) \
V(Word32AtomicAnd) \ V(Word32AtomicAdd) \
V(Word32AtomicOr) \ V(Word32AtomicSub) \
V(Word32AtomicXor) \ V(Word32AtomicAnd) \
V(Word32AtomicPairAdd) \ V(Word32AtomicOr) \
V(Word32AtomicPairSub) \ V(Word32AtomicXor) \
V(Word32AtomicPairAnd) \ V(Word32AtomicPairAdd) \
V(Word32AtomicPairOr) \ V(Word32AtomicPairSub) \
V(Word32AtomicPairXor) \ V(Word32AtomicPairAnd) \
V(SpeculationFence) \ V(Word32AtomicPairOr) \
V(SignExtendWord8ToInt32) \ V(Word32AtomicPairXor) \
V(SignExtendWord16ToInt32) \ V(Word32AtomicPairExchange) \
V(SignExtendWord8ToInt64) \ V(Word32AtomicPairCompareExchange) \
V(SignExtendWord16ToInt64) \ V(SpeculationFence) \
V(SignExtendWord32ToInt64) \ V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
V(SignExtendWord8ToInt64) \
V(SignExtendWord16ToInt64) \
V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd) V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \ #define MACHINE_SIMD_OP_LIST(V) \
......
...@@ -1743,11 +1743,15 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { ...@@ -1743,11 +1743,15 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32AtomicPairAnd: case IrOpcode::kWord32AtomicPairAnd:
case IrOpcode::kWord32AtomicPairOr: case IrOpcode::kWord32AtomicPairOr:
case IrOpcode::kWord32AtomicPairXor: case IrOpcode::kWord32AtomicPairXor:
case IrOpcode::kWord32AtomicPairExchange:
case IrOpcode::kWord32AtomicPairCompareExchange:
case IrOpcode::kWord64AtomicNarrowAdd: case IrOpcode::kWord64AtomicNarrowAdd:
case IrOpcode::kWord64AtomicNarrowSub: case IrOpcode::kWord64AtomicNarrowSub:
case IrOpcode::kWord64AtomicNarrowAnd: case IrOpcode::kWord64AtomicNarrowAnd:
case IrOpcode::kWord64AtomicNarrowOr: case IrOpcode::kWord64AtomicNarrowOr:
case IrOpcode::kWord64AtomicNarrowXor: case IrOpcode::kWord64AtomicNarrowXor:
case IrOpcode::kWord64AtomicNarrowExchange:
case IrOpcode::kWord64AtomicNarrowCompareExchange:
case IrOpcode::kSpeculationFence: case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32: case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32: case IrOpcode::kSignExtendWord16ToInt32:
......
...@@ -47,11 +47,9 @@ WASM_EXEC_TEST(I64AtomicOr) { ...@@ -47,11 +47,9 @@ WASM_EXEC_TEST(I64AtomicOr) {
WASM_EXEC_TEST(I64AtomicXor) { WASM_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor); RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange) { WASM_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange); RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
} }
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) { Uint32BinOp expected_op) {
...@@ -90,11 +88,9 @@ WASM_EXEC_TEST(I64AtomicOr32U) { ...@@ -90,11 +88,9 @@ WASM_EXEC_TEST(I64AtomicOr32U) {
WASM_EXEC_TEST(I64AtomicXor32U) { WASM_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor); RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange32U) { WASM_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange); RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
} }
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op, void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
Uint16BinOp expected_op) { Uint16BinOp expected_op) {
...@@ -133,11 +129,9 @@ WASM_EXEC_TEST(I64AtomicOr16U) { ...@@ -133,11 +129,9 @@ WASM_EXEC_TEST(I64AtomicOr16U) {
WASM_EXEC_TEST(I64AtomicXor16U) { WASM_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor); RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange16U) { WASM_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange); RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
} }
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) { Uint8BinOp expected_op) {
...@@ -175,7 +169,6 @@ WASM_EXEC_TEST(I64AtomicOr8U) { ...@@ -175,7 +169,6 @@ WASM_EXEC_TEST(I64AtomicOr8U) {
WASM_EXEC_TEST(I64AtomicXor8U) { WASM_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor); RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange8U) { WASM_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange); RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
} }
...@@ -265,6 +258,7 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) { ...@@ -265,6 +258,7 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
} }
} }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicLoad) { WASM_EXEC_TEST(I64AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads); EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode); WasmRunner<uint64_t> r(execution_mode);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment