Commit 6f23c89e authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

Add I64Atomic binary operations for ia32

Bug:v8:6532

Change-Id: Ie983fa561654f86597b8f45c5ce11f993846bfe6
Reviewed-on: https://chromium-review.googlesource.com/1145893
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54796}
parent 9ba2a0a2
......@@ -168,6 +168,22 @@ class IA32OperandConverter : public InstructionOperandConverter {
Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
Operand NextMemoryOperand(size_t offset = 0) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
Register base = InputRegister(NextOffset(&offset));
const int32_t disp = 4;
if (mode == kMode_MR1) {
Register index = InputRegister(NextOffset(&offset));
ScaleFactor scale = ScaleFor(kMode_MR1, kMode_MR1);
return Operand(base, index, scale, disp);
} else if (mode == kMode_MRI) {
Constant ctant = ToConstant(instr_->InputAt(NextOffset(&offset)));
return Operand(base, ctant.ToInt32() + disp, ctant.rmode());
} else {
UNREACHABLE();
}
}
};
......@@ -411,6 +427,23 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
do { \
Label binop; \
__ bind(&binop); \
__ mov(i.OutputRegister(0), i.MemoryOperand(2)); \
__ mov(i.OutputRegister(1), i.NextMemoryOperand(2)); \
__ push(i.InputRegister(0)); \
__ push(i.InputRegister(1)); \
__ instr1(i.InputRegister(0), i.OutputRegister(0)); \
__ instr2(i.InputRegister(1), i.OutputRegister(1)); \
__ lock(); \
__ cmpxchg8b(i.MemoryOperand(2)); \
__ pop(i.InputRegister(1)); \
__ pop(i.InputRegister(0)); \
__ j(not_equal, &binop); \
} while (false);
#define ASSEMBLE_MOVX(mov_instr) \
do { \
if (instr->addressing_mode() != kMode_None) { \
......@@ -3652,30 +3685,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
case kWord32Atomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
case kWord32Atomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
case kWord32Atomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
case kWord32Atomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
#define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
case kIA32Word64AtomicNarrow##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(i.OutputRegister(0), i.OutputRegister(0)); \
__ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
break; \
} \
case kWord32Atomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
case kWord32Atomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
case kIA32Word64AtomicNarrow##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(i.OutputRegister(0), i.OutputRegister(0)); \
__ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
break; \
} \
case kWord32Atomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
case kIA32Word64AtomicNarrow##op##Uint32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
__ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
break; \
} \
case kWord32Atomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
......@@ -3683,6 +3733,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, or_)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
#define ATOMIC_BINOP_CASE(op, instr1, instr2) \
case kIA32Word32AtomicPair##op: { \
ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
break; \
}
ATOMIC_BINOP_CASE(Add, add, adc)
ATOMIC_BINOP_CASE(And, and_, and_)
ATOMIC_BINOP_CASE(Or, or_, or_)
ATOMIC_BINOP_CASE(Xor, xor_, xor_)
#undef ATOMIC_BINOP_CASE
case kIA32Word32AtomicPairSub: {
Label binop;
__ bind(&binop);
// Move memory operand into edx:eax
__ mov(i.OutputRegister(0), i.MemoryOperand(2));
__ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
// Save input registers temporarily on the stack.
__ push(i.InputRegister(0));
__ push(i.InputRegister(1));
// Negate input in place
__ neg(i.InputRegister(0));
__ adc(i.InputRegister(1), 0);
__ neg(i.InputRegister(1));
// Add memory operand, negated input.
__ add(i.InputRegister(0), i.OutputRegister(0));
__ adc(i.InputRegister(1), i.OutputRegister(1));
__ lock();
__ cmpxchg8b(i.MemoryOperand(2));
// Restore input registers
__ pop(i.InputRegister(1));
__ pop(i.InputRegister(0));
__ j(not_equal, &binop);
break;
}
case kWord32AtomicLoadInt8:
case kWord32AtomicLoadUint8:
case kWord32AtomicLoadInt16:
......@@ -4452,6 +4536,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_I64ATOMIC_BINOP
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
......
This diff is collapsed.
......@@ -368,6 +368,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence:
return kHasSideEffect;
case kIA32Word64AtomicNarrowAddUint8:
case kIA32Word64AtomicNarrowAddUint16:
case kIA32Word64AtomicNarrowAddUint32:
case kIA32Word64AtomicNarrowSubUint8:
case kIA32Word64AtomicNarrowSubUint16:
case kIA32Word64AtomicNarrowSubUint32:
case kIA32Word64AtomicNarrowAndUint8:
case kIA32Word64AtomicNarrowAndUint16:
case kIA32Word64AtomicNarrowAndUint32:
case kIA32Word64AtomicNarrowOrUint8:
case kIA32Word64AtomicNarrowOrUint16:
case kIA32Word64AtomicNarrowOrUint32:
case kIA32Word64AtomicNarrowXorUint8:
case kIA32Word64AtomicNarrowXorUint16:
case kIA32Word64AtomicNarrowXorUint32:
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:
case kIA32Word32AtomicPairOr:
case kIA32Word32AtomicPairXor:
return kHasSideEffect;
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
......
......@@ -1319,6 +1319,100 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
selector->Emit(code, 1, outputs, input_count, inputs);
}
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineType type) {
AddressingMode addressing_mode;
IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
InstructionOperand index_operand;
if (g.CanBeImmediate(index)) {
index_operand = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
index_operand = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionOperand inputs[] = {g.UseUniqueRegister(value),
g.UseUniqueRegister(base), index_operand};
InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
InstructionOperand temp[1];
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
temp[0] = g.UseByteRegister(node);
} else {
temp[0] = g.TempRegister();
}
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temp), temp);
}
void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
// For Word64 operations, the value input is split into the a high node,
// and a low node in the int64-lowering phase.
Node* value_high = node->InputAt(3);
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
AddressingMode addressing_mode;
InstructionOperand index_operand;
if (g.CanBeImmediate(index)) {
index_operand = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
index_operand = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionOperand inputs[] = {g.UseFixed(value, ebx),
g.UseFixed(value_high, ecx),
g.UseUniqueRegister(base), index_operand};
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineType type) {
IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
AddressingMode addressing_mode;
InstructionOperand index_operand;
if (g.CanBeImmediate(index)) {
index_operand = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
index_operand = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionOperand inputs[] = {g.UseUniqueRegister(value),
g.UseUniqueRegister(base), index_operand};
InstructionOperand outputs[] = {
g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
InstructionOperand temp[1];
if (type == MachineType::Uint8()) {
temp[0] = g.UseByteRegister(node);
} else {
temp[0] = g.TempRegister();
}
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temp), temp);
}
} // namespace
// Shared routine for word comparison with zero.
......@@ -1676,11 +1770,6 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
......@@ -1697,28 +1786,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
return;
}
InstructionOperand outputs[1];
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
outputs[0] = g.DefineAsFixed(node, eax);
InstructionOperand temp[1];
if (type == MachineType::Int8() || type == MachineType::Uint8()) {
temp[0] = g.UseByteRegister(node);
} else {
temp[0] = g.TempRegister();
}
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, 1, temp);
VisitAtomicBinOp(this, node, opcode, type);
}
#define VISIT_ATOMIC_BINOP(op) \
......@@ -1735,6 +1803,59 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairAdd);
}
void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairSub);
}
void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairAnd);
}
void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairOr);
}
void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairXor);
}
void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
ArchOpcode uint8_op,
ArchOpcode uint16_op,
ArchOpcode uint32_op) {
MachineType type = AtomicOpType(node->op());
DCHECK(type != MachineType::Uint64());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint32()) {
opcode = uint32_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else {
UNREACHABLE();
return;
}
VisitNarrowAtomicBinOp(this, node, opcode, type);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
VisitWord64AtomicNarrowBinop(node, kIA32Word64AtomicNarrow##op##Uint8, \
kIA32Word64AtomicNarrow##op##Uint16, \
kIA32Word64AtomicNarrow##op##Uint32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_INT_TYPES(V) \
V(I32x4) \
V(I16x8) \
......
......@@ -1725,6 +1725,31 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Exchange, Word64)
ATOMIC_CASE(CompareExchange, Word32)
ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
#define ATOMIC_CASE(name) \
case IrOpcode::kWord32AtomicPair##name: { \
MarkAsWord32(node); \
MarkPairProjectionsAsWord32(node); \
return VisitWord32AtomicPair##name(node); \
}
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
#undef ATOMIC_CASE
#define ATOMIC_CASE(name) \
case IrOpcode::kWord64AtomicNarrow##name: { \
MachineType type = AtomicOpType(node->op()); \
MarkAsRepresentation(type.representation(), node); \
MarkPairProjectionsAsWord32(node); \
return VisitWord64AtomicNarrow##name(node); \
}
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
......@@ -2353,6 +2378,48 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowSub(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowAnd(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
......
......@@ -731,6 +731,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
ArchOpcode uint16_op,
ArchOpcode uint32_op,
ArchOpcode uint64_op);
void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
ArchOpcode uint16_op, ArchOpcode uint32_op);
// ===========================================================================
......
......@@ -117,6 +117,23 @@ int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
} // namespace
void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
DCHECK_EQ(5, node->InputCount());
Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
NodeProperties::ChangeOp(node, op);
ReplaceNodeWithProjections(node);
}
void Int64Lowering::LowerWord64AtomicNarrowBinop(Node* node,
const Operator* op) {
DCHECK_EQ(5, node->InputCount());
DefaultLowering(node, true);
NodeProperties::ChangeOp(node, op);
ReplaceNodeWithProjections(node);
}
// static
int Int64Lowering::GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature) {
......@@ -867,6 +884,23 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs();
break;
}
#define ATOMIC_CASE(name) \
case IrOpcode::kWord64Atomic##name: { \
MachineType type = AtomicOpType(node->op()); \
if (type == MachineType::Uint64()) { \
LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \
} else { \
LowerWord64AtomicNarrowBinop(node, \
machine()->Word64AtomicNarrow##name(type)); \
} \
break; \
}
ATOMIC_CASE(Add)
ATOMIC_CASE(Sub)
ATOMIC_CASE(And)
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
#undef ATOMIC_CASE
default: { DefaultLowering(node); }
}
......
......@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
bool DefaultLowering(Node* node, bool low_word_only = false);
void LowerComparison(Node* node, const Operator* signed_op,
const Operator* unsigned_op);
void LowerWord64AtomicBinop(Node* node, const Operator* op);
void LowerWord64AtomicNarrowBinop(Node* node, const Operator* op);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
......
......@@ -390,18 +390,19 @@ MachineType AtomicOpType(Operator const* op) {
V(kTaggedPointer) \
V(kTagged)
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
V(Uint32)
#define ATOMIC_TYPE_LIST(V) \
ATOMIC_U32_TYPE_LIST(V) \
V(Int8) \
V(Uint8) \
V(Int16) \
V(Uint16) \
V(Int32) \
V(Uint32)
V(Int32)
#define ATOMIC64_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
V(Uint32) \
#define ATOMIC_U64_TYPE_LIST(V) \
ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
#define ATOMIC_REPRESENTATION_LIST(V) \
......@@ -413,6 +414,13 @@ MachineType AtomicOpType(Operator const* op) {
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
#define ATOMIC_PAIR_BINOP_LIST(V) \
V(Add) \
V(Sub) \
V(And) \
V(Or) \
V(Xor)
#define SIMD_LANE_OP_LIST(V) \
V(F32x4, 4) \
V(I32x4, 4) \
......@@ -592,7 +600,7 @@ struct MachineOperatorGlobalCache {
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
ATOMIC64_TYPE_LIST(ATOMIC_LOAD)
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
......@@ -647,7 +655,7 @@ struct MachineOperatorGlobalCache {
ATOMIC_OP(Word64AtomicOr, type) \
ATOMIC_OP(Word64AtomicXor, type) \
ATOMIC_OP(Word64AtomicExchange, type)
ATOMIC64_TYPE_LIST(ATOMIC64_OP_LIST)
ATOMIC_U64_TYPE_LIST(ATOMIC64_OP_LIST)
#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
......@@ -676,9 +684,39 @@ struct MachineOperatorGlobalCache {
}; \
Word64AtomicCompareExchange##Type##Operator \
kWord64AtomicCompareExchange##Type;
ATOMIC64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
Word32AtomicPair##op##Operator() \
: Operator(IrOpcode::kWord32AtomicPair##op, \
Operator::kNoDeopt | Operator::kNoThrow, \
"Word32AtomicPair##op", 4, 1, 1, 2, 1, 0) {} \
}; \
Word32AtomicPair##op##Operator kWord32AtomicPair##op;
ATOMIC_PAIR_BINOP_LIST(ATOMIC_PAIR_OP)
#undef ATOMIC_PAIR_OP
#undef ATOMIC_PAIR_BINOP_LIST
#define ATOMIC64_NARROW_OP(op, type) \
struct op##type##Operator : public Operator1<MachineType> { \
op##type##Operator() \
: Operator1<MachineType>( \
IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, "#op", \
3, 1, 1, 2, 1, 0, MachineType::type()) {} \
}; \
op##type##Operator k##op##type;
#define ATOMIC_OP_LIST(type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowAdd, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \
ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type)
ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
#undef ATOMIC64_NARROW_OP
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
......@@ -1036,25 +1074,13 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
return &cache_.kTaggedPoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
return &cache_.kWord32PoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
return &cache_.kWord64PoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kWord64AtomicLoad##Type; \
}
ATOMIC64_TYPE_LIST(LOAD)
ATOMIC_U64_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
......@@ -1075,7 +1101,7 @@ const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicAdd##kType; \
}
ATOMIC64_TYPE_LIST(ADD)
ATOMIC_U64_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
......@@ -1085,7 +1111,7 @@ const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicSub##kType; \
}
ATOMIC64_TYPE_LIST(SUB)
ATOMIC_U64_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
......@@ -1095,7 +1121,7 @@ const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicAnd##kType; \
}
ATOMIC64_TYPE_LIST(AND)
ATOMIC_U64_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
......@@ -1105,7 +1131,7 @@ const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicOr##kType; \
}
ATOMIC64_TYPE_LIST(OR)
ATOMIC_U64_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
......@@ -1115,7 +1141,7 @@ const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicXor##kType; \
}
ATOMIC64_TYPE_LIST(XOR)
ATOMIC_U64_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
......@@ -1125,7 +1151,7 @@ const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicExchange##kType; \
}
ATOMIC64_TYPE_LIST(EXCHANGE)
ATOMIC_U64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
......@@ -1136,11 +1162,97 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicCompareExchange##kType; \
}
ATOMIC64_TYPE_LIST(COMPARE_EXCHANGE)
ATOMIC_U64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
return &cache_.kWord32AtomicPairAdd;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairSub() {
return &cache_.kWord32AtomicPairSub;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAnd() {
return &cache_.kWord32AtomicPairAnd;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairOr() {
return &cache_.kWord32AtomicPairOr;
}
const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
return &cache_.kWord32AtomicPairXor;
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd(
MachineType type) {
#define ADD(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowAdd##kType; \
}
ATOMIC_U32_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowSub(
MachineType type) {
#define SUB(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowSub##kType; \
}
ATOMIC_U32_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowAnd(
MachineType type) {
#define AND(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowAnd##kType; \
}
ATOMIC_U32_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowOr(MachineType type) {
#define OR(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowOr##kType; \
}
ATOMIC_U32_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor(
MachineType type) {
#define XOR(kType) \
if (type == MachineType::kType()) { \
return &cache_.kWord64AtomicNarrowXor##kType; \
}
ATOMIC_U32_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
return &cache_.kTaggedPoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
return &cache_.kWord32PoisonOnSpeculation;
}
const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
return &cache_.kWord64PoisonOnSpeculation;
}
const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
return OptionalOperator(flags_ & kSpeculationFence,
&cache_.kSpeculationFence);
......@@ -1203,7 +1315,8 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
#undef MACHINE_TYPE_LIST
#undef MACHINE_REPRESENTATION_LIST
#undef ATOMIC_TYPE_LIST
#undef ATOMIC64_TYPE_LIST
#undef ATOMIC_U64_TYPE_LIST
#undef ATOMIC_U32_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
......
......@@ -640,9 +640,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-or [base + index], value
const Operator* Word32AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word32AtomicXor(MachineType type);
// atomic-load [base + index]
const Operator* Word64AtomicAdd(MachineType type);
const Operator* Word32AtomicXor(MachineType rep);
// atomic-add [base + index], value
const Operator* Word64AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
const Operator* Word64AtomicSub(MachineType type);
// atomic-and [base + index], value
......@@ -650,7 +650,27 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-or [base + index], value
const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType type);
const Operator* Word64AtomicXor(MachineType rep);
// atomic-narrow-add [base + index], value
const Operator* Word64AtomicNarrowAdd(MachineType type);
// atomic-narow-sub [base + index], value
const Operator* Word64AtomicNarrowSub(MachineType type);
// atomic-narrow-and [base + index], value
const Operator* Word64AtomicNarrowAnd(MachineType type);
// atomic-narrow-or [base + index], value
const Operator* Word64AtomicNarrowOr(MachineType type);
// atomic-narrow-xor [base + index], value
const Operator* Word64AtomicNarrowXor(MachineType type);
// atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low
const Operator* Word32AtomicPairSub();
// atomic-pair-and [base + index], value_high, value_low
const Operator* Word32AtomicPairAnd();
// atomic-pair-or [base + index], value_high, value_low
const Operator* Word32AtomicPairOr();
// atomic-pair-xor [base + index], value_high, value_low
const Operator* Word32AtomicPairXor();
const OptionalOperator SpeculationFence();
......
......@@ -562,106 +562,119 @@
V(Float64Mod) \
V(Float64Pow)
#define MACHINE_OP_LIST(V) \
MACHINE_UNOP_32_LIST(V) \
MACHINE_BINOP_32_LIST(V) \
MACHINE_BINOP_64_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
MACHINE_FLOAT32_BINOP_LIST(V) \
MACHINE_FLOAT32_UNOP_LIST(V) \
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
V(DebugAbort) \
V(DebugBreak) \
V(Comment) \
V(Load) \
V(PoisonedLoad) \
V(Store) \
V(StackSlot) \
V(Word32Popcnt) \
V(Word64Popcnt) \
V(Word64Clz) \
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToInt64) \
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat32ToUint64) \
V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(TruncateFloat64ToFloat32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
V(RoundUint32ToFloat32) \
V(RoundUint64ToFloat32) \
V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastInt64ToFloat64) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(TaggedPoisonOnSpeculation) \
V(Word32PoisonOnSpeculation) \
V(Word64PoisonOnSpeculation) \
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
V(Int32PairSub) \
V(Int32PairMul) \
V(Word32PairShl) \
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
V(Word32AtomicLoad) \
V(Word32AtomicStore) \
V(Word32AtomicExchange) \
V(Word32AtomicCompareExchange) \
V(Word32AtomicAdd) \
V(Word32AtomicSub) \
V(Word32AtomicAnd) \
V(Word32AtomicOr) \
V(Word32AtomicXor) \
V(Word64AtomicLoad) \
V(Word64AtomicStore) \
V(Word64AtomicAdd) \
V(Word64AtomicSub) \
V(Word64AtomicAnd) \
V(Word64AtomicOr) \
V(Word64AtomicXor) \
V(Word64AtomicExchange) \
V(Word64AtomicCompareExchange) \
V(SpeculationFence) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
V(SignExtendWord8ToInt64) \
V(SignExtendWord16ToInt64) \
V(SignExtendWord32ToInt64) \
#define MACHINE_WORD64_ATOMIC_OP_LIST(V) \
V(Word64AtomicLoad) \
V(Word64AtomicStore) \
V(Word64AtomicAdd) \
V(Word64AtomicSub) \
V(Word64AtomicAnd) \
V(Word64AtomicOr) \
V(Word64AtomicXor) \
V(Word64AtomicExchange) \
V(Word64AtomicCompareExchange) \
V(Word64AtomicNarrowAdd) \
V(Word64AtomicNarrowSub) \
V(Word64AtomicNarrowAnd) \
V(Word64AtomicNarrowOr) \
V(Word64AtomicNarrowXor)
#define MACHINE_OP_LIST(V) \
MACHINE_UNOP_32_LIST(V) \
MACHINE_BINOP_32_LIST(V) \
MACHINE_BINOP_64_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
MACHINE_FLOAT32_BINOP_LIST(V) \
MACHINE_FLOAT32_UNOP_LIST(V) \
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_WORD64_ATOMIC_OP_LIST(V) \
V(DebugAbort) \
V(DebugBreak) \
V(Comment) \
V(Load) \
V(PoisonedLoad) \
V(Store) \
V(StackSlot) \
V(Word32Popcnt) \
V(Word64Popcnt) \
V(Word64Clz) \
V(Word64Ctz) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToInt64) \
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat32ToUint64) \
V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(TruncateFloat64ToFloat32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
V(RoundUint32ToFloat32) \
V(RoundUint64ToFloat32) \
V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
V(BitcastInt64ToFloat64) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(TaggedPoisonOnSpeculation) \
V(Word32PoisonOnSpeculation) \
V(Word64PoisonOnSpeculation) \
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
V(Int32PairSub) \
V(Int32PairMul) \
V(Word32PairShl) \
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
V(Word32AtomicLoad) \
V(Word32AtomicStore) \
V(Word32AtomicExchange) \
V(Word32AtomicCompareExchange) \
V(Word32AtomicAdd) \
V(Word32AtomicSub) \
V(Word32AtomicAnd) \
V(Word32AtomicOr) \
V(Word32AtomicXor) \
V(Word32AtomicPairAdd) \
V(Word32AtomicPairSub) \
V(Word32AtomicPairAnd) \
V(Word32AtomicPairOr) \
V(Word32AtomicPairXor) \
V(SpeculationFence) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
V(SignExtendWord8ToInt64) \
V(SignExtendWord16ToInt64) \
V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
......
......@@ -1738,6 +1738,16 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64AtomicExchange:
case IrOpcode::kWord64AtomicCompareExchange:
case IrOpcode::kWord32AtomicPairAdd:
case IrOpcode::kWord32AtomicPairSub:
case IrOpcode::kWord32AtomicPairAnd:
case IrOpcode::kWord32AtomicPairOr:
case IrOpcode::kWord32AtomicPairXor:
case IrOpcode::kWord64AtomicNarrowAdd:
case IrOpcode::kWord64AtomicNarrowSub:
case IrOpcode::kWord64AtomicNarrowAnd:
case IrOpcode::kWord64AtomicNarrowOr:
case IrOpcode::kWord64AtomicNarrowXor:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
......
......@@ -308,6 +308,7 @@ v8_source_set("cctest_sources") {
"test-code-stubs.h",
"test-disasm-ia32.cc",
"test-log-stack-tracer.cc",
"wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "mips") {
sources += [ ### gcmole(arch:mips) ###
......
......@@ -47,9 +47,11 @@ WASM_EXEC_TEST(I64AtomicOr) {
WASM_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
......@@ -88,9 +90,11 @@ WASM_EXEC_TEST(I64AtomicOr32U) {
WASM_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
......@@ -129,9 +133,11 @@ WASM_EXEC_TEST(I64AtomicOr16U) {
WASM_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
......@@ -169,6 +175,7 @@ WASM_EXEC_TEST(I64AtomicOr8U) {
WASM_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
WASM_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
}
......@@ -401,6 +408,7 @@ WASM_EXEC_TEST(I64AtomicStoreLoad8U) {
CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
} // namespace test_run_wasm_atomics_64
} // namespace wasm
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment