Commit 9a0f2546 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[compiler] Remove AtomicNarrow machine operators, macroize tests

The AtomicNarrow operations are currently used for wider 64-bit
operations, that only operate on 32-bits of data or less
(Ex:I64AtomicAdd8U). Removing these because this can be handled
in int64-lowering by zeroing the higher order node.
Explicitly zeroing these in code-gen is not
required because -

 - The spec requires only the data exchange to be atomic, for narrow
   ops this uses only the low word.
 - The return values are not in memory, so are not visible to other
   workers/threads

BUG:v8:6532

Change-Id: I90a795ab6c21c70cb096f59a137de653c9c6a178
Reviewed-on: https://chromium-review.googlesource.com/1194428Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55499}
parent 0697fe84
...@@ -453,11 +453,6 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen, ...@@ -453,11 +453,6 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \ __ dmb(ISH); \
} while (0) } while (0)
#define ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op) \
if (arch_opcode == kArmWord64AtomicNarrow##op) { \
__ mov(i.OutputRegister(1), Operand(0)); \
}
#define ASSEMBLE_IEEE754_BINOP(name) \ #define ASSEMBLE_IEEE754_BINOP(name) \
do { \ do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
...@@ -2684,23 +2679,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2684,23 +2679,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); __ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicExchangeUint8: case kWord32AtomicExchangeUint8:
case kArmWord64AtomicNarrowExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint8);
break; break;
case kWord32AtomicExchangeInt16: case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); __ sxth(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicExchangeUint16: case kWord32AtomicExchangeUint16:
case kArmWord64AtomicNarrowExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint16);
break; break;
case kWord32AtomicExchangeWord32: case kWord32AtomicExchangeWord32:
case kArmWord64AtomicNarrowExchangeUint32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint32);
break; break;
case kWord32AtomicCompareExchangeInt8: case kWord32AtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
...@@ -2710,12 +2699,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2710,12 +2699,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); __ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicCompareExchangeUint8: case kWord32AtomicCompareExchangeUint8:
case kArmWord64AtomicNarrowCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2)); __ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb, ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2)); i.TempRegister(2));
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint8);
break; break;
case kWord32AtomicCompareExchangeInt16: case kWord32AtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
...@@ -2725,19 +2712,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2725,19 +2712,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); __ sxth(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicCompareExchangeUint16: case kWord32AtomicCompareExchangeUint16:
case kArmWord64AtomicNarrowCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2)); __ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh, ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2)); i.TempRegister(2));
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint16);
break; break;
case kWord32AtomicCompareExchangeWord32: case kWord32AtomicCompareExchangeWord32:
case kArmWord64AtomicNarrowCompareExchangeUint32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex, ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2)); i.InputRegister(2));
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint32);
break; break;
#define ATOMIC_BINOP_CASE(op, inst) \ #define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: \ case kWord32Atomic##op##Int8: \
...@@ -2745,23 +2728,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2745,23 +2728,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ __ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \ break; \
case kWord32Atomic##op##Uint8: \ case kWord32Atomic##op##Uint8: \
case kArmWord64AtomicNarrow##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \ ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint8); \
break; \ break; \
case kWord32Atomic##op##Int16: \ case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \ ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \ __ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \ break; \
case kWord32Atomic##op##Uint16: \ case kWord32Atomic##op##Uint16: \
case kArmWord64AtomicNarrow##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \ ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint16); \
break; \ break; \
case kWord32Atomic##op##Word32: \ case kWord32Atomic##op##Word32: \
case kArmWord64AtomicNarrow##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \ ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint32); \
break; break;
ATOMIC_BINOP_CASE(Add, add) ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub) ATOMIC_BINOP_CASE(Sub, sub)
...@@ -2836,7 +2813,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2836,7 +2813,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
#undef ATOMIC_LOGIC_BINOP_CASE #undef ATOMIC_LOGIC_BINOP_CASE
#undef ATOMIC_NARROW_OP_CLEAR_HIGH_WORD
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER #undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER #undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
......
This diff is collapsed.
...@@ -285,27 +285,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -285,27 +285,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmWord32AtomicPairXor: case kArmWord32AtomicPairXor:
case kArmWord32AtomicPairExchange: case kArmWord32AtomicPairExchange:
case kArmWord32AtomicPairCompareExchange: case kArmWord32AtomicPairCompareExchange:
case kArmWord64AtomicNarrowAddUint8:
case kArmWord64AtomicNarrowAddUint16:
case kArmWord64AtomicNarrowAddUint32:
case kArmWord64AtomicNarrowSubUint8:
case kArmWord64AtomicNarrowSubUint16:
case kArmWord64AtomicNarrowSubUint32:
case kArmWord64AtomicNarrowAndUint8:
case kArmWord64AtomicNarrowAndUint16:
case kArmWord64AtomicNarrowAndUint32:
case kArmWord64AtomicNarrowOrUint8:
case kArmWord64AtomicNarrowOrUint16:
case kArmWord64AtomicNarrowOrUint32:
case kArmWord64AtomicNarrowXorUint8:
case kArmWord64AtomicNarrowXorUint16:
case kArmWord64AtomicNarrowXorUint32:
case kArmWord64AtomicNarrowExchangeUint8:
case kArmWord64AtomicNarrowExchangeUint16:
case kArmWord64AtomicNarrowExchangeUint32:
case kArmWord64AtomicNarrowCompareExchangeUint8:
case kArmWord64AtomicNarrowCompareExchangeUint16:
case kArmWord64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect; return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
......
...@@ -424,25 +424,6 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node, ...@@ -424,25 +424,6 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
arraysize(temps), temps); arraysize(temps), temps);
} }
void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
ArmOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r4),
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r5)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
} // namespace } // namespace
void InstructionSelector::VisitStackSlot(Node* node) { void InstructionSelector::VisitStackSlot(Node* node) {
...@@ -2314,39 +2295,6 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { ...@@ -2314,39 +2295,6 @@ void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor); VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor);
} }
void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
ArchOpcode uint8_op,
ArchOpcode uint16_op,
ArchOpcode uint32_op) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = uint32_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else {
UNREACHABLE();
return;
}
VisitNarrowAtomicBinOp(this, node, opcode);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
VisitWord64AtomicNarrowBinop(node, kArmWord64AtomicNarrow##op##Uint8, \
kArmWord64AtomicNarrow##op##Uint16, \
kArmWord64AtomicNarrow##op##Uint32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
ArmOperandGenerator g(this); ArmOperandGenerator g(this);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
...@@ -2367,35 +2315,6 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { ...@@ -2367,35 +2315,6 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
arraysize(temps), temps); arraysize(temps), temps);
} }
void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArmWord64AtomicNarrowExchangeUint8;
} else if (type == MachineType::Uint16()) {
opcode = kArmWord64AtomicNarrowExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kArmWord64AtomicNarrowExchangeUint32;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {
g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
ArmOperandGenerator g(this); ArmOperandGenerator g(this);
AddressingMode addressing_mode = kMode_Offset_RR; AddressingMode addressing_mode = kMode_Offset_RR;
...@@ -2413,38 +2332,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { ...@@ -2413,38 +2332,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
arraysize(temps), temps); arraysize(temps), temps);
} }
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArmWord64AtomicNarrowCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
opcode = kArmWord64AtomicNarrowCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kArmWord64AtomicNarrowCompareExchangeUint32;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(old_value),
g.UseUniqueRegister(new_value)};
InstructionOperand outputs[] = {
g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
#define SIMD_TYPE_LIST(V) \ #define SIMD_TYPE_LIST(V) \
V(F32x4) \ V(F32x4) \
V(I32x4) \ V(I32x4) \
......
...@@ -3674,27 +3674,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3674,27 +3674,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchg(i.InputRegister(0), i.MemoryOperand(1)); __ xchg(i.InputRegister(0), i.MemoryOperand(1));
break; break;
} }
// For the narrow Word64 operations below, i.OutputRegister(1) contains
// the high-order 32 bits for the 64bit operation. As the data exchange
// fits in one register, the i.OutputRegister(1) needs to be cleared for
// the correct return value to be propagated back.
case kIA32Word64AtomicNarrowExchangeUint8: {
__ xchg_b(i.OutputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowExchangeUint16: {
__ xchg_w(i.OutputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowExchangeUint32: {
__ xchg(i.OutputRegister(0), i.MemoryOperand(1));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word32AtomicPairExchange: { case kIA32Word32AtomicPairExchange: {
__ mov(i.OutputRegister(0), i.MemoryOperand(2)); __ mov(i.OutputRegister(0), i.MemoryOperand(2));
__ mov(i.OutputRegister(1), i.NextMemoryOperand(2)); __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
...@@ -3731,26 +3710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3731,26 +3710,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1)); __ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break; break;
} }
case kIA32Word64AtomicNarrowCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word64AtomicNarrowCompareExchangeUint32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
__ xor_(i.OutputRegister(1), i.OutputRegister(1));
break;
}
case kIA32Word32AtomicPairCompareExchange: { case kIA32Word32AtomicPairCompareExchange: {
__ lock(); __ lock();
__ cmpxchg8b(i.MemoryOperand(4)); __ cmpxchg8b(i.MemoryOperand(4));
...@@ -3762,12 +3721,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3762,12 +3721,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movsx_b(eax, eax); \ __ movsx_b(eax, eax); \
break; \ break; \
} \ } \
case kIA32Word64AtomicNarrow##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(i.OutputRegister(0), i.OutputRegister(0)); \
__ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
break; \
} \
case kWord32Atomic##op##Uint8: { \ case kWord32Atomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \ __ movzx_b(eax, eax); \
...@@ -3778,22 +3731,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3778,22 +3731,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movsx_w(eax, eax); \ __ movsx_w(eax, eax); \
break; \ break; \
} \ } \
case kIA32Word64AtomicNarrow##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(i.OutputRegister(0), i.OutputRegister(0)); \
__ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
break; \
} \
case kWord32Atomic##op##Uint16: { \ case kWord32Atomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \ __ movzx_w(eax, eax); \
break; \ break; \
} \ } \
case kIA32Word64AtomicNarrow##op##Uint32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
__ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
break; \
} \
case kWord32Atomic##op##Word32: { \ case kWord32Atomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \ break; \
......
This diff is collapsed.
...@@ -380,27 +380,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -380,27 +380,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairXor: case kIA32Word32AtomicPairXor:
case kIA32Word32AtomicPairExchange: case kIA32Word32AtomicPairExchange:
case kIA32Word32AtomicPairCompareExchange: case kIA32Word32AtomicPairCompareExchange:
case kIA32Word64AtomicNarrowAddUint8:
case kIA32Word64AtomicNarrowAddUint16:
case kIA32Word64AtomicNarrowAddUint32:
case kIA32Word64AtomicNarrowSubUint8:
case kIA32Word64AtomicNarrowSubUint16:
case kIA32Word64AtomicNarrowSubUint32:
case kIA32Word64AtomicNarrowAndUint8:
case kIA32Word64AtomicNarrowAndUint16:
case kIA32Word64AtomicNarrowAndUint32:
case kIA32Word64AtomicNarrowOrUint8:
case kIA32Word64AtomicNarrowOrUint16:
case kIA32Word64AtomicNarrowOrUint32:
case kIA32Word64AtomicNarrowXorUint8:
case kIA32Word64AtomicNarrowXorUint16:
case kIA32Word64AtomicNarrowXorUint32:
case kIA32Word64AtomicNarrowExchangeUint8:
case kIA32Word64AtomicNarrowExchangeUint16:
case kIA32Word64AtomicNarrowExchangeUint32:
case kIA32Word64AtomicNarrowCompareExchangeUint8:
case kIA32Word64AtomicNarrowCompareExchangeUint16:
case kIA32Word64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect; return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
......
...@@ -1358,30 +1358,6 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node, ...@@ -1358,30 +1358,6 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs); selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
} }
void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineType type) {
IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegister(value), g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
InstructionOperand temp[] = {(type == MachineType::Uint8())
? g.UseByteRegister(node)
: g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temp), temp);
}
} // namespace } // namespace
// Shared routine for word comparison with zero. // Shared routine for word comparison with zero.
...@@ -1844,111 +1820,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { ...@@ -1844,111 +1820,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs); Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
} }
void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
ArchOpcode uint8_op,
ArchOpcode uint16_op,
ArchOpcode uint32_op) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = uint32_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else {
UNREACHABLE();
return;
}
VisitNarrowAtomicBinOp(this, node, opcode, type);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
VisitWord64AtomicNarrowBinop(node, kIA32Word64AtomicNarrow##op##Uint8, \
kIA32Word64AtomicNarrow##op##Uint16, \
kIA32Word64AtomicNarrow##op##Uint32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = kIA32Word64AtomicNarrowExchangeUint32;
} else if (type == MachineType::Uint16()) {
opcode = kIA32Word64AtomicNarrowExchangeUint16;
} else if (type == MachineType::Uint8()) {
opcode = kIA32Word64AtomicNarrowExchangeUint8;
} else {
UNREACHABLE();
return;
}
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
InstructionOperand value_operand =
(type.representation() == MachineRepresentation::kWord8)
? g.UseFixed(value, edx)
: g.UseUniqueRegister(value);
InstructionOperand inputs[] = {
value_operand, g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[2];
if (type.representation() == MachineRepresentation::kWord8) {
// Using DefineSameAsFirst requires the register to be unallocated.
outputs[0] = g.DefineAsFixed(NodeProperties::FindProjection(node, 0), edx);
} else {
outputs[0] = g.DefineSameAsFirst(NodeProperties::FindProjection(node, 0));
}
outputs[1] = g.DefineAsRegister(NodeProperties::FindProjection(node, 1));
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = kIA32Word64AtomicNarrowCompareExchangeUint32;
} else if (type == MachineType::Uint16()) {
opcode = kIA32Word64AtomicNarrowCompareExchangeUint16;
} else if (type == MachineType::Uint8()) {
opcode = kIA32Word64AtomicNarrowCompareExchangeUint8;
} else {
UNREACHABLE();
return;
}
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
AddressingMode addressing_mode;
InstructionOperand new_value_operand =
(type.representation() == MachineRepresentation::kWord8)
? g.UseByteRegister(new_value)
: g.UseUniqueRegister(new_value);
InstructionOperand inputs[] = {
g.UseFixed(old_value, eax), new_value_operand, g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
#define SIMD_INT_TYPES(V) \ #define SIMD_INT_TYPES(V) \
V(I32x4) \ V(I32x4) \
V(I16x8) \ V(I16x8) \
......
...@@ -1746,21 +1746,6 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1746,21 +1746,6 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Xor) ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange) ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange) ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE
#define ATOMIC_CASE(name) \
case IrOpcode::kWord64AtomicNarrow##name: { \
MachineType type = AtomicOpType(node->op()); \
MarkAsRepresentation(type.representation(), node); \
MarkPairProjectionsAsWord32(node); \
return VisitWord64AtomicNarrow##name(node); \
}
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
ATOMIC_CASE(Exchange)
ATOMIC_CASE(CompareExchange)
#undef ATOMIC_CASE #undef ATOMIC_CASE
case IrOpcode::kSpeculationFence: case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node); return VisitSpeculationFence(node);
...@@ -2425,34 +2410,6 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { ...@@ -2425,34 +2410,6 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowSub(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowAnd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
......
...@@ -127,9 +127,10 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) { ...@@ -127,9 +127,10 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
} }
void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) { void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
DefaultLowering(node, true); Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, op); NodeProperties::ChangeOp(node, op);
ReplaceNodeWithProjections(node); ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
} }
// static // static
...@@ -915,8 +916,7 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -915,8 +916,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (type == MachineType::Uint64()) { \ if (type == MachineType::Uint64()) { \
LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \ LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \
} else { \ } else { \
LowerWord64AtomicNarrowOp(node, \ LowerWord64AtomicNarrowOp(node, machine()->Word32Atomic##name(type)); \
machine()->Word64AtomicNarrow##name(type)); \
} \ } \
break; \ break; \
} }
...@@ -940,8 +940,8 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -940,8 +940,8 @@ void Int64Lowering::LowerNode(Node* node) {
machine()->Word32AtomicPairCompareExchange()); machine()->Word32AtomicPairCompareExchange());
ReplaceNodeWithProjections(node); ReplaceNodeWithProjections(node);
} else { } else {
LowerWord64AtomicNarrowOp( LowerWord64AtomicNarrowOp(node,
node, machine()->Word64AtomicNarrowCompareExchange(type)); machine()->Word32AtomicCompareExchange(type));
} }
break; break;
} }
......
...@@ -718,25 +718,6 @@ struct MachineOperatorGlobalCache { ...@@ -718,25 +718,6 @@ struct MachineOperatorGlobalCache {
#undef ATOMIC_PAIR_OP #undef ATOMIC_PAIR_OP
#undef ATOMIC_PAIR_BINOP_LIST #undef ATOMIC_PAIR_BINOP_LIST
#define ATOMIC64_NARROW_OP(op, type) \
struct op##type##Operator : public Operator1<MachineType> { \
op##type##Operator() \
: Operator1<MachineType>( \
IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, "#op", \
3, 1, 1, 2, 1, 0, MachineType::type()) {} \
}; \
op##type##Operator k##op##type;
#define ATOMIC_OP_LIST(type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowAdd, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowExchange, type)
ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
#undef ATOMIC64_NARROW_OP
struct Word32AtomicPairCompareExchangeOperator : public Operator { struct Word32AtomicPairCompareExchangeOperator : public Operator {
Word32AtomicPairCompareExchangeOperator() Word32AtomicPairCompareExchangeOperator()
: Operator(IrOpcode::kWord32AtomicPairCompareExchange, : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
...@@ -745,20 +726,6 @@ struct MachineOperatorGlobalCache { ...@@ -745,20 +726,6 @@ struct MachineOperatorGlobalCache {
}; };
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange; Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
#define ATOMIC_COMPARE_EXCHANGE(Type) \
struct Word64AtomicNarrowCompareExchange##Type##Operator \
: public Operator1<MachineType> { \
Word64AtomicNarrowCompareExchange##Type##Operator() \
: Operator1<MachineType>(IrOpcode::kWord64AtomicNarrowCompareExchange, \
Operator::kNoDeopt | Operator::kNoThrow, \
"Word64AtomicNarrowCompareExchange", 4, 1, 1, \
2, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicNarrowCompareExchange##Type##Operator \
kWord64AtomicNarrowCompareExchange##Type;
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
// The {BitcastWordToTagged} operator must not be marked as pure (especially // The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler // not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live // might decide to split these operators, thus potentially creating live
...@@ -1245,82 +1212,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() { ...@@ -1245,82 +1212,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange; return &cache_.kWord32AtomicPairCompareExchange;
} }
const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd(
MachineType type) {
#define ADD(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowAdd##kType; \
}
ATOMIC_U32_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowSub(
MachineType type) {
#define SUB(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowSub##kType; \
}
ATOMIC_U32_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowAnd(
MachineType type) {
#define AND(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowAnd##kType; \
}
ATOMIC_U32_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowOr(MachineType type) {
#define OR(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowOr##kType; \
}
ATOMIC_U32_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor(
MachineType type) {
#define XOR(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowXor##kType; \
}
ATOMIC_U32_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowExchange(
MachineType type) {
#define EXCHANGE(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowExchange##kType; \
}
ATOMIC_U32_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowCompareExchange(
MachineType type) {
#define CMP_EXCHANGE(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowCompareExchange##kType; \
}
ATOMIC_U32_TYPE_LIST(CMP_EXCHANGE)
#undef CMP_EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() { const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
return &cache_.kTaggedPoisonOnSpeculation; return &cache_.kTaggedPoisonOnSpeculation;
} }
......
...@@ -648,20 +648,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -648,20 +648,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word64AtomicOr(MachineType type); const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value // atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType rep); const Operator* Word64AtomicXor(MachineType rep);
// atomic-narrow-add [base + index], value
const Operator* Word64AtomicNarrowAdd(MachineType type);
// atomic-narow-sub [base + index], value
const Operator* Word64AtomicNarrowSub(MachineType type);
// atomic-narrow-and [base + index], value
const Operator* Word64AtomicNarrowAnd(MachineType type);
// atomic-narrow-or [base + index], value
const Operator* Word64AtomicNarrowOr(MachineType type);
// atomic-narrow-xor [base + index], value
const Operator* Word64AtomicNarrowXor(MachineType type);
// atomic-narrow-exchange [base + index], value
const Operator* Word64AtomicNarrowExchange(MachineType type);
// atomic-narrow-compare-exchange [base + index], old_value, new_value
const Operator* Word64AtomicNarrowCompareExchange(MachineType type);
// atomic-pair-load [base + index] // atomic-pair-load [base + index]
const Operator* Word32AtomicPairLoad(); const Operator* Word32AtomicPairLoad();
// atomic-pair-sub [base + index], value_high, value-low // atomic-pair-sub [base + index], value_high, value-low
......
...@@ -572,14 +572,7 @@ ...@@ -572,14 +572,7 @@
V(Word64AtomicOr) \ V(Word64AtomicOr) \
V(Word64AtomicXor) \ V(Word64AtomicXor) \
V(Word64AtomicExchange) \ V(Word64AtomicExchange) \
V(Word64AtomicCompareExchange) \ V(Word64AtomicCompareExchange)
V(Word64AtomicNarrowAdd) \
V(Word64AtomicNarrowSub) \
V(Word64AtomicNarrowAnd) \
V(Word64AtomicNarrowOr) \
V(Word64AtomicNarrowXor) \
V(Word64AtomicNarrowExchange) \
V(Word64AtomicNarrowCompareExchange)
#define MACHINE_OP_LIST(V) \ #define MACHINE_OP_LIST(V) \
MACHINE_UNOP_32_LIST(V) \ MACHINE_UNOP_32_LIST(V) \
......
...@@ -1751,13 +1751,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { ...@@ -1751,13 +1751,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32AtomicPairXor: case IrOpcode::kWord32AtomicPairXor:
case IrOpcode::kWord32AtomicPairExchange: case IrOpcode::kWord32AtomicPairExchange:
case IrOpcode::kWord32AtomicPairCompareExchange: case IrOpcode::kWord32AtomicPairCompareExchange:
case IrOpcode::kWord64AtomicNarrowAdd:
case IrOpcode::kWord64AtomicNarrowSub:
case IrOpcode::kWord64AtomicNarrowAnd:
case IrOpcode::kWord64AtomicNarrowOr:
case IrOpcode::kWord64AtomicNarrowXor:
case IrOpcode::kWord64AtomicNarrowExchange:
case IrOpcode::kWord64AtomicNarrowCompareExchange:
case IrOpcode::kSpeculationFence: case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32: case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32: case IrOpcode::kSignExtendWord16ToInt32:
......
...@@ -32,24 +32,12 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -32,24 +32,12 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I32AtomicAdd) { #define TEST_OPERATION(Name) \
RunU32BinOp(execution_tier, kExprI32AtomicAdd, Add); WASM_EXEC_TEST(I32Atomic##Name) { \
} RunU32BinOp(execution_tier, kExprI32Atomic##Name, Name); \
WASM_EXEC_TEST(I32AtomicSub) { }
RunU32BinOp(execution_tier, kExprI32AtomicSub, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicAnd) {
RunU32BinOp(execution_tier, kExprI32AtomicAnd, And);
}
WASM_EXEC_TEST(I32AtomicOr) {
RunU32BinOp(execution_tier, kExprI32AtomicOr, Or);
}
WASM_EXEC_TEST(I32AtomicXor) {
RunU32BinOp(execution_tier, kExprI32AtomicXor, Xor);
}
WASM_EXEC_TEST(I32AtomicExchange) {
RunU32BinOp(execution_tier, kExprI32AtomicExchange, Exchange);
}
void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op, void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
Uint16BinOp expected_op) { Uint16BinOp expected_op) {
...@@ -73,24 +61,12 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op, ...@@ -73,24 +61,12 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I32AtomicAdd16U) { #define TEST_OPERATION(Name) \
RunU16BinOp(execution_tier, kExprI32AtomicAdd16U, Add); WASM_EXEC_TEST(I32Atomic##Name##16U) { \
} RunU16BinOp(execution_tier, kExprI32Atomic##Name##16U, Name); \
WASM_EXEC_TEST(I32AtomicSub16U) { }
RunU16BinOp(execution_tier, kExprI32AtomicSub16U, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicAnd16U) {
RunU16BinOp(execution_tier, kExprI32AtomicAnd16U, And);
}
WASM_EXEC_TEST(I32AtomicOr16U) {
RunU16BinOp(execution_tier, kExprI32AtomicOr16U, Or);
}
WASM_EXEC_TEST(I32AtomicXor16U) {
RunU16BinOp(execution_tier, kExprI32AtomicXor16U, Xor);
}
WASM_EXEC_TEST(I32AtomicExchange16U) {
RunU16BinOp(execution_tier, kExprI32AtomicExchange16U, Exchange);
}
void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint8BinOp expected_op) { Uint8BinOp expected_op) {
...@@ -113,24 +89,12 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -113,24 +89,12 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I32AtomicAdd8U) { #define TEST_OPERATION(Name) \
RunU8BinOp(execution_tier, kExprI32AtomicAdd8U, Add); WASM_EXEC_TEST(I32Atomic##Name##8U) { \
} RunU8BinOp(execution_tier, kExprI32Atomic##Name##8U, Name); \
WASM_EXEC_TEST(I32AtomicSub8U) { }
RunU8BinOp(execution_tier, kExprI32AtomicSub8U, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I32AtomicAnd8U) {
RunU8BinOp(execution_tier, kExprI32AtomicAnd8U, And);
}
WASM_EXEC_TEST(I32AtomicOr8U) {
RunU8BinOp(execution_tier, kExprI32AtomicOr8U, Or);
}
WASM_EXEC_TEST(I32AtomicXor8U) {
RunU8BinOp(execution_tier, kExprI32AtomicXor8U, Xor);
}
WASM_EXEC_TEST(I32AtomicExchange8U) {
RunU8BinOp(execution_tier, kExprI32AtomicExchange8U, Exchange);
}
WASM_EXEC_TEST(I32AtomicCompareExchange) { WASM_EXEC_TEST(I32AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads); EXPERIMENTAL_FLAG_SCOPE(threads);
......
...@@ -32,24 +32,12 @@ void RunU64BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -32,24 +32,12 @@ void RunU64BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I64AtomicAdd) { #define TEST_OPERATION(Name) \
RunU64BinOp(execution_tier, kExprI64AtomicAdd, Add); WASM_EXEC_TEST(I64Atomic##Name) { \
} RunU64BinOp(execution_tier, kExprI64Atomic##Name, Name); \
WASM_EXEC_TEST(I64AtomicSub) { }
RunU64BinOp(execution_tier, kExprI64AtomicSub, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicAnd) {
RunU64BinOp(execution_tier, kExprI64AtomicAnd, And);
}
WASM_EXEC_TEST(I64AtomicOr) {
RunU64BinOp(execution_tier, kExprI64AtomicOr, Or);
}
WASM_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_tier, kExprI64AtomicXor, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_tier, kExprI64AtomicExchange, Exchange);
}
void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint32BinOp expected_op) { Uint32BinOp expected_op) {
...@@ -73,24 +61,12 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -73,24 +61,12 @@ void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I64AtomicAdd32U) { #define TEST_OPERATION(Name) \
RunU32BinOp(execution_tier, kExprI64AtomicAdd32U, Add); WASM_EXEC_TEST(I64Atomic##Name##32U) { \
} RunU32BinOp(execution_tier, kExprI64Atomic##Name##32U, Name); \
WASM_EXEC_TEST(I64AtomicSub32U) { }
RunU32BinOp(execution_tier, kExprI64AtomicSub32U, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicAnd32U) {
RunU32BinOp(execution_tier, kExprI64AtomicAnd32U, And);
}
WASM_EXEC_TEST(I64AtomicOr32U) {
RunU32BinOp(execution_tier, kExprI64AtomicOr32U, Or);
}
WASM_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_tier, kExprI64AtomicXor32U, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_tier, kExprI64AtomicExchange32U, Exchange);
}
void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op, void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
Uint16BinOp expected_op) { Uint16BinOp expected_op) {
...@@ -114,24 +90,12 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op, ...@@ -114,24 +90,12 @@ void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I64AtomicAdd16U) { #define TEST_OPERATION(Name) \
RunU16BinOp(execution_tier, kExprI64AtomicAdd16U, Add); WASM_EXEC_TEST(I64Atomic##Name##16U) { \
} RunU16BinOp(execution_tier, kExprI64Atomic##Name##16U, Name); \
WASM_EXEC_TEST(I64AtomicSub16U) { }
RunU16BinOp(execution_tier, kExprI64AtomicSub16U, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicAnd16U) {
RunU16BinOp(execution_tier, kExprI64AtomicAnd16U, And);
}
WASM_EXEC_TEST(I64AtomicOr16U) {
RunU16BinOp(execution_tier, kExprI64AtomicOr16U, Or);
}
WASM_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_tier, kExprI64AtomicXor16U, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_tier, kExprI64AtomicExchange16U, Exchange);
}
void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint8BinOp expected_op) { Uint8BinOp expected_op) {
...@@ -154,24 +118,12 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op, ...@@ -154,24 +118,12 @@ void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
} }
} }
WASM_EXEC_TEST(I64AtomicAdd8U) { #define TEST_OPERATION(Name) \
RunU8BinOp(execution_tier, kExprI64AtomicAdd8U, Add); WASM_EXEC_TEST(I64Atomic##Name##8U) { \
} RunU8BinOp(execution_tier, kExprI64Atomic##Name##8U, Name); \
WASM_EXEC_TEST(I64AtomicSub8U) { }
RunU8BinOp(execution_tier, kExprI64AtomicSub8U, Sub); OPERATION_LIST(TEST_OPERATION)
} #undef TEST_OPERATION
WASM_EXEC_TEST(I64AtomicAnd8U) {
RunU8BinOp(execution_tier, kExprI64AtomicAnd8U, And);
}
WASM_EXEC_TEST(I64AtomicOr8U) {
RunU8BinOp(execution_tier, kExprI64AtomicOr8U, Or);
}
WASM_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_tier, kExprI64AtomicXor8U, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_tier, kExprI64AtomicExchange8U, Exchange);
}
WASM_EXEC_TEST(I64AtomicCompareExchange) { WASM_EXEC_TEST(I64AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads); EXPERIMENTAL_FLAG_SCOPE(threads);
......
...@@ -13,6 +13,14 @@ namespace v8 { ...@@ -13,6 +13,14 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
#define OPERATION_LIST(V) \
V(Add) \
V(Sub) \
V(And) \
V(Or) \
V(Xor) \
V(Exchange)
typedef uint64_t (*Uint64BinOp)(uint64_t, uint64_t); typedef uint64_t (*Uint64BinOp)(uint64_t, uint64_t);
typedef uint32_t (*Uint32BinOp)(uint32_t, uint32_t); typedef uint32_t (*Uint32BinOp)(uint32_t, uint32_t);
typedef uint16_t (*Uint16BinOp)(uint16_t, uint16_t); typedef uint16_t (*Uint16BinOp)(uint16_t, uint16_t);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment