Commit 41ceccc5 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm] Add I64{Exchange, CompareExchange} ops for x64

Bug:v8:6532

Change-Id: Ida865c9cc7c029cf070b24296f6ef7bb573b30c4
Reviewed-on: https://chromium-review.googlesource.com/947094Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51790}
parent 34a2d29f
......@@ -1554,7 +1554,9 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Xor, Word32)
ATOMIC_CASE(Xor, Word64)
ATOMIC_CASE(Exchange, Word32)
ATOMIC_CASE(Exchange, Word64)
ATOMIC_CASE(CompareExchange, Word32)
ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
......@@ -2171,6 +2173,14 @@ void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
......
......@@ -603,13 +603,13 @@ struct MachineOperatorGlobalCache {
3, 1, 1, 1, 1, 0, MachineType::type()) {} \
}; \
op##type##Operator k##op##type;
#define ATOMIC_OP_LIST(type) \
ATOMIC_OP(Word32AtomicExchange, type) \
ATOMIC_OP(Word32AtomicAdd, type) \
ATOMIC_OP(Word32AtomicSub, type) \
ATOMIC_OP(Word32AtomicAnd, type) \
ATOMIC_OP(Word32AtomicOr, type) \
ATOMIC_OP(Word32AtomicXor, type)
#define ATOMIC_OP_LIST(type) \
ATOMIC_OP(Word32AtomicAdd, type) \
ATOMIC_OP(Word32AtomicSub, type) \
ATOMIC_OP(Word32AtomicAnd, type) \
ATOMIC_OP(Word32AtomicOr, type) \
ATOMIC_OP(Word32AtomicXor, type) \
ATOMIC_OP(Word32AtomicExchange, type)
ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
#define ATOMIC64_OP_LIST(type) \
......@@ -617,7 +617,8 @@ struct MachineOperatorGlobalCache {
ATOMIC_OP(Word64AtomicSub, type) \
ATOMIC_OP(Word64AtomicAnd, type) \
ATOMIC_OP(Word64AtomicOr, type) \
ATOMIC_OP(Word64AtomicXor, type)
ATOMIC_OP(Word64AtomicXor, type) \
ATOMIC_OP(Word64AtomicExchange, type)
ATOMIC64_TYPE_LIST(ATOMIC64_OP_LIST)
#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
......@@ -636,6 +637,20 @@ struct MachineOperatorGlobalCache {
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
#define ATOMIC_COMPARE_EXCHANGE(Type) \
struct Word64AtomicCompareExchange##Type##Operator \
: public Operator1<MachineType> { \
Word64AtomicCompareExchange##Type##Operator() \
: Operator1<MachineType>(IrOpcode::kWord64AtomicCompareExchange, \
Operator::kNoDeopt | Operator::kNoThrow, \
"Word64AtomicCompareExchange", 4, 1, 1, 1, 1, \
0, MachineType::Type()) {} \
}; \
Word64AtomicCompareExchange##Type##Operator \
kWord64AtomicCompareExchange##Type;
ATOMIC64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
......@@ -1008,6 +1023,27 @@ const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType rep) {
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType rep) {
#define EXCHANGE(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicExchange##kRep; \
}
ATOMIC64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
MachineType rep) {
#define COMPARE_EXCHANGE(kRep) \
if (rep == MachineType::kRep()) { \
return &cache_.kWord64AtomicCompareExchange##kRep; \
}
ATOMIC64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
return OptionalOperator(flags_ & kSpeculationFence,
&cache_.kSpeculationFence);
......
......@@ -615,8 +615,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word32AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType rep);
// atomic-exchange [base + index], value
const Operator* Word64AtomicExchange(MachineType rep);
// atomic-compare-exchange [base + index], old_value, new_value
const Operator* Word32AtomicCompareExchange(MachineType rep);
// atomic-compare-exchange [base + index], old_value, new_value
const Operator* Word64AtomicCompareExchange(MachineType rep);
// atomic-add [base + index], value
const Operator* Word32AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
......
......@@ -636,6 +636,8 @@
V(Word64AtomicAnd) \
V(Word64AtomicOr) \
V(Word64AtomicXor) \
V(Word64AtomicExchange) \
V(Word64AtomicCompareExchange) \
V(SpeculationFence) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
......
......@@ -1699,6 +1699,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicOr:
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64AtomicExchange:
case IrOpcode::kWord64AtomicCompareExchange:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
......
......@@ -4545,50 +4545,58 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
inputs[0], inputs[1]);
}
#define ATOMIC_BINOP_LIST(V) \
V(I32AtomicAdd, Add, Uint32, Word32) \
V(I64AtomicAdd, Add, Uint64, Word64) \
V(I32AtomicAdd8U, Add, Uint8, Word32) \
V(I32AtomicAdd16U, Add, Uint16, Word32) \
V(I64AtomicAdd8U, Add, Uint8, Word64) \
V(I64AtomicAdd16U, Add, Uint16, Word64) \
V(I64AtomicAdd32U, Add, Uint32, Word64) \
V(I32AtomicSub, Sub, Uint32, Word32) \
V(I64AtomicSub, Sub, Uint64, Word64) \
V(I32AtomicSub8U, Sub, Uint8, Word32) \
V(I32AtomicSub16U, Sub, Uint16, Word32) \
V(I64AtomicSub8U, Sub, Uint8, Word64) \
V(I64AtomicSub16U, Sub, Uint16, Word64) \
V(I64AtomicSub32U, Sub, Uint32, Word64) \
V(I32AtomicAnd, And, Uint32, Word32) \
V(I64AtomicAnd, And, Uint64, Word64) \
V(I32AtomicAnd8U, And, Uint8, Word32) \
V(I64AtomicAnd16U, And, Uint16, Word64) \
V(I32AtomicAnd16U, And, Uint16, Word32) \
V(I64AtomicAnd8U, And, Uint8, Word64) \
V(I64AtomicAnd32U, And, Uint32, Word64) \
V(I32AtomicOr, Or, Uint32, Word32) \
V(I64AtomicOr, Or, Uint64, Word64) \
V(I32AtomicOr8U, Or, Uint8, Word32) \
V(I32AtomicOr16U, Or, Uint16, Word32) \
V(I64AtomicOr8U, Or, Uint8, Word64) \
V(I64AtomicOr16U, Or, Uint16, Word64) \
V(I64AtomicOr32U, Or, Uint32, Word64) \
V(I32AtomicXor, Xor, Uint32, Word32) \
V(I64AtomicXor, Xor, Uint64, Word64) \
V(I32AtomicXor8U, Xor, Uint8, Word32) \
V(I32AtomicXor16U, Xor, Uint16, Word32) \
V(I64AtomicXor8U, Xor, Uint8, Word64) \
V(I64AtomicXor16U, Xor, Uint16, Word64) \
V(I64AtomicXor32U, Xor, Uint32, Word64) \
V(I32AtomicExchange, Exchange, Uint32, Word32) \
V(I32AtomicExchange8U, Exchange, Uint8, Word32) \
V(I32AtomicExchange16U, Exchange, Uint16, Word32)
#define ATOMIC_TERNARY_LIST(V) \
V(I32AtomicCompareExchange, CompareExchange, Uint32) \
V(I32AtomicCompareExchange8U, CompareExchange, Uint8) \
V(I32AtomicCompareExchange16U, CompareExchange, Uint16)
#define ATOMIC_BINOP_LIST(V) \
V(I32AtomicAdd, Add, Uint32, Word32) \
V(I64AtomicAdd, Add, Uint64, Word64) \
V(I32AtomicAdd8U, Add, Uint8, Word32) \
V(I32AtomicAdd16U, Add, Uint16, Word32) \
V(I64AtomicAdd8U, Add, Uint8, Word64) \
V(I64AtomicAdd16U, Add, Uint16, Word64) \
V(I64AtomicAdd32U, Add, Uint32, Word64) \
V(I32AtomicSub, Sub, Uint32, Word32) \
V(I64AtomicSub, Sub, Uint64, Word64) \
V(I32AtomicSub8U, Sub, Uint8, Word32) \
V(I32AtomicSub16U, Sub, Uint16, Word32) \
V(I64AtomicSub8U, Sub, Uint8, Word64) \
V(I64AtomicSub16U, Sub, Uint16, Word64) \
V(I64AtomicSub32U, Sub, Uint32, Word64) \
V(I32AtomicAnd, And, Uint32, Word32) \
V(I64AtomicAnd, And, Uint64, Word64) \
V(I32AtomicAnd8U, And, Uint8, Word32) \
V(I64AtomicAnd16U, And, Uint16, Word64) \
V(I32AtomicAnd16U, And, Uint16, Word32) \
V(I64AtomicAnd8U, And, Uint8, Word64) \
V(I64AtomicAnd32U, And, Uint32, Word64) \
V(I32AtomicOr, Or, Uint32, Word32) \
V(I64AtomicOr, Or, Uint64, Word64) \
V(I32AtomicOr8U, Or, Uint8, Word32) \
V(I32AtomicOr16U, Or, Uint16, Word32) \
V(I64AtomicOr8U, Or, Uint8, Word64) \
V(I64AtomicOr16U, Or, Uint16, Word64) \
V(I64AtomicOr32U, Or, Uint32, Word64) \
V(I32AtomicXor, Xor, Uint32, Word32) \
V(I64AtomicXor, Xor, Uint64, Word64) \
V(I32AtomicXor8U, Xor, Uint8, Word32) \
V(I32AtomicXor16U, Xor, Uint16, Word32) \
V(I64AtomicXor8U, Xor, Uint8, Word64) \
V(I64AtomicXor16U, Xor, Uint16, Word64) \
V(I64AtomicXor32U, Xor, Uint32, Word64) \
V(I32AtomicExchange, Exchange, Uint32, Word32) \
V(I64AtomicExchange, Exchange, Uint64, Word64) \
V(I32AtomicExchange8U, Exchange, Uint8, Word32) \
V(I32AtomicExchange16U, Exchange, Uint16, Word32) \
V(I64AtomicExchange8U, Exchange, Uint8, Word64) \
V(I64AtomicExchange16U, Exchange, Uint16, Word64) \
V(I64AtomicExchange32U, Exchange, Uint32, Word64)
#define ATOMIC_TERNARY_LIST(V) \
V(I32AtomicCompareExchange, CompareExchange, Uint32, Word32) \
V(I64AtomicCompareExchange, CompareExchange, Uint64, Word64) \
V(I32AtomicCompareExchange8U, CompareExchange, Uint8, Word32) \
V(I32AtomicCompareExchange16U, CompareExchange, Uint16, Word32) \
V(I64AtomicCompareExchange8U, CompareExchange, Uint8, Word64) \
V(I64AtomicCompareExchange16U, CompareExchange, Uint16, Word64) \
V(I64AtomicCompareExchange32U, CompareExchange, Uint32, Word64)
#define ATOMIC_LOAD_LIST(V) \
V(I32AtomicLoad, Uint32) \
......@@ -4619,13 +4627,13 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type, Prefix) \
case wasm::kExpr##Name: { \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
jsgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
break; \
}
......
......@@ -2736,6 +2736,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orl)
ATOMIC_BINOP_CASE(Xor, xorl)
#undef ATOMIC_BINOP_CASE
case kX64Word64AtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movzxbq(i.InputRegister(0), i.InputRegister(0));
break;
}
case kX64Word64AtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movzxwq(i.InputRegister(0), i.InputRegister(0));
break;
}
case kX64Word64AtomicExchangeUint32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
case kX64Word64AtomicExchangeUint64: {
__ xchgq(i.InputRegister(0), i.MemoryOperand(1));
break;
}
case kX64Word64AtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movzxbq(rax, rax);
break;
}
case kX64Word64AtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movzxwq(rax, rax);
break;
}
case kX64Word64AtomicCompareExchangeUint32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
break;
}
case kX64Word64AtomicCompareExchangeUint64: {
__ lock();
__ cmpxchgq(i.MemoryOperand(2), i.InputRegister(1));
break;
}
#define ATOMIC64_BINOP_CASE(op, inst) \
case kX64Word64Atomic##op##Uint8: \
ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
......
This diff is collapsed.
......@@ -287,6 +287,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Word64AtomicXorUint16:
case kX64Word64AtomicXorUint32:
case kX64Word64AtomicXorUint64:
case kX64Word64AtomicExchangeUint8:
case kX64Word64AtomicExchangeUint16:
case kX64Word64AtomicExchangeUint32:
case kX64Word64AtomicExchangeUint64:
case kX64Word64AtomicCompareExchangeUint8:
case kX64Word64AtomicCompareExchangeUint16:
case kX64Word64AtomicCompareExchangeUint32:
case kX64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
#define CASE(Name) case k##Name:
......
......@@ -2239,6 +2239,44 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
opcode = kX64Word64AtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kX64Word64AtomicExchangeUint32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
return;
}
InstructionOperand outputs[1];
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
outputs[0] = g.DefineSameAsFirst(node);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
......@@ -2281,6 +2319,46 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
MachineType type = AtomicOpRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
opcode = kX64Word64AtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
opcode = kX64Word64AtomicCompareExchangeUint32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
return;
}
InstructionOperand outputs[1];
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseFixed(old_value, rax);
inputs[input_count++] = g.UseUniqueRegister(new_value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
outputs[0] = g.DefineAsFixed(node, rax);
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitAtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
......
......@@ -54,51 +54,59 @@ struct WasmException;
(this->errorf(this->pc_, "%s: %s", WasmOpcodes::OpcodeName(opcode), \
(message)))
#define ATOMIC_OP_LIST(V) \
V(I32AtomicLoad, Uint32) \
V(I32AtomicLoad8U, Uint8) \
V(I32AtomicLoad16U, Uint16) \
V(I32AtomicAdd, Uint32) \
V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \
V(I64AtomicAdd, Uint64) \
V(I64AtomicAdd8U, Uint8) \
V(I64AtomicAdd16U, Uint16) \
V(I64AtomicAdd32U, Uint32) \
V(I32AtomicSub, Uint32) \
V(I64AtomicSub, Uint64) \
V(I32AtomicSub8U, Uint8) \
V(I32AtomicSub16U, Uint16) \
V(I64AtomicSub8U, Uint8) \
V(I64AtomicSub16U, Uint16) \
V(I64AtomicSub32U, Uint32) \
V(I32AtomicAnd, Uint32) \
V(I64AtomicAnd, Uint64) \
V(I32AtomicAnd8U, Uint8) \
V(I32AtomicAnd16U, Uint16) \
V(I64AtomicAnd8U, Uint8) \
V(I64AtomicAnd16U, Uint16) \
V(I64AtomicAnd32U, Uint32) \
V(I32AtomicOr, Uint32) \
V(I64AtomicOr, Uint64) \
V(I32AtomicOr8U, Uint8) \
V(I32AtomicOr16U, Uint16) \
V(I64AtomicOr8U, Uint8) \
V(I64AtomicOr16U, Uint16) \
V(I64AtomicOr32U, Uint32) \
V(I32AtomicXor, Uint32) \
V(I64AtomicXor, Uint64) \
V(I32AtomicXor8U, Uint8) \
V(I32AtomicXor16U, Uint16) \
V(I64AtomicXor8U, Uint8) \
V(I64AtomicXor16U, Uint16) \
V(I64AtomicXor32U, Uint32) \
V(I32AtomicExchange, Uint32) \
V(I32AtomicExchange8U, Uint8) \
V(I32AtomicExchange16U, Uint16) \
V(I32AtomicCompareExchange, Uint32) \
V(I32AtomicCompareExchange8U, Uint8) \
V(I32AtomicCompareExchange16U, Uint16)
#define ATOMIC_OP_LIST(V) \
V(I32AtomicLoad, Uint32) \
V(I32AtomicLoad8U, Uint8) \
V(I32AtomicLoad16U, Uint16) \
V(I32AtomicAdd, Uint32) \
V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \
V(I64AtomicAdd, Uint64) \
V(I64AtomicAdd8U, Uint8) \
V(I64AtomicAdd16U, Uint16) \
V(I64AtomicAdd32U, Uint32) \
V(I32AtomicSub, Uint32) \
V(I64AtomicSub, Uint64) \
V(I32AtomicSub8U, Uint8) \
V(I32AtomicSub16U, Uint16) \
V(I64AtomicSub8U, Uint8) \
V(I64AtomicSub16U, Uint16) \
V(I64AtomicSub32U, Uint32) \
V(I32AtomicAnd, Uint32) \
V(I64AtomicAnd, Uint64) \
V(I32AtomicAnd8U, Uint8) \
V(I32AtomicAnd16U, Uint16) \
V(I64AtomicAnd8U, Uint8) \
V(I64AtomicAnd16U, Uint16) \
V(I64AtomicAnd32U, Uint32) \
V(I32AtomicOr, Uint32) \
V(I64AtomicOr, Uint64) \
V(I32AtomicOr8U, Uint8) \
V(I32AtomicOr16U, Uint16) \
V(I64AtomicOr8U, Uint8) \
V(I64AtomicOr16U, Uint16) \
V(I64AtomicOr32U, Uint32) \
V(I32AtomicXor, Uint32) \
V(I64AtomicXor, Uint64) \
V(I32AtomicXor8U, Uint8) \
V(I32AtomicXor16U, Uint16) \
V(I64AtomicXor8U, Uint8) \
V(I64AtomicXor16U, Uint16) \
V(I64AtomicXor32U, Uint32) \
V(I32AtomicExchange, Uint32) \
V(I64AtomicExchange, Uint64) \
V(I32AtomicExchange8U, Uint8) \
V(I32AtomicExchange16U, Uint16) \
V(I64AtomicExchange8U, Uint8) \
V(I64AtomicExchange16U, Uint16) \
V(I64AtomicExchange32U, Uint32) \
V(I32AtomicCompareExchange, Uint32) \
V(I64AtomicCompareExchange, Uint64) \
V(I32AtomicCompareExchange8U, Uint8) \
V(I32AtomicCompareExchange16U, Uint16) \
V(I64AtomicCompareExchange8U, Uint8) \
V(I64AtomicCompareExchange16U, Uint16) \
V(I64AtomicCompareExchange32U, Uint32)
#define ATOMIC_STORE_OP_LIST(V) \
V(I32AtomicStore, Uint32) \
......
......@@ -260,8 +260,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic_and")
CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic_or")
CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic_xor")
CASE_U32_OP(AtomicExchange, "atomic_xchng")
CASE_U32_OP(AtomicCompareExchange, "atomic_cmpxchng")
CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic_xchng")
CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic_cmpxchng")
default : return "unknown";
// clang-format on
......
......@@ -414,62 +414,70 @@ using WasmName = Vector<const char>;
V(I64SConvertSatF64, 0xfc06, l_d) \
V(I64UConvertSatF64, 0xfc07, l_d)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I64AtomicLoad, 0xfe11, l_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
V(I32AtomicLoad16U, 0xfe13, i_i) \
V(I64AtomicLoad8U, 0xfe14, l_i) \
V(I64AtomicLoad16U, 0xfe15, l_i) \
V(I64AtomicLoad32U, 0xfe16, l_i) \
V(I32AtomicStore, 0xfe17, v_ii) \
V(I64AtomicStore, 0xfe18, v_il) \
V(I32AtomicStore8U, 0xfe19, v_ii) \
V(I32AtomicStore16U, 0xfe1a, v_ii) \
V(I64AtomicStore8U, 0xfe1b, v_il) \
V(I64AtomicStore16U, 0xfe1c, v_il) \
V(I64AtomicStore32U, 0xfe1d, v_il) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I64AtomicAdd, 0xfe1f, l_il) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
V(I64AtomicAdd8U, 0xfe22, l_il) \
V(I64AtomicAdd16U, 0xfe23, l_il) \
V(I64AtomicAdd32U, 0xfe24, l_il) \
V(I32AtomicSub, 0xfe25, i_ii) \
V(I64AtomicSub, 0xfe26, l_il) \
V(I32AtomicSub8U, 0xfe27, i_ii) \
V(I32AtomicSub16U, 0xfe28, i_ii) \
V(I64AtomicSub8U, 0xfe29, l_il) \
V(I64AtomicSub16U, 0xfe2a, l_il) \
V(I64AtomicSub32U, 0xfe2b, l_il) \
V(I32AtomicAnd, 0xfe2c, i_ii) \
V(I64AtomicAnd, 0xfe2d, l_il) \
V(I32AtomicAnd8U, 0xfe2e, i_ii) \
V(I32AtomicAnd16U, 0xfe2f, i_ii) \
V(I64AtomicAnd8U, 0xfe30, l_il) \
V(I64AtomicAnd16U, 0xfe31, l_il) \
V(I64AtomicAnd32U, 0xfe32, l_il) \
V(I32AtomicOr, 0xfe33, i_ii) \
V(I64AtomicOr, 0xfe34, l_il) \
V(I32AtomicOr8U, 0xfe35, i_ii) \
V(I32AtomicOr16U, 0xfe36, i_ii) \
V(I64AtomicOr8U, 0xfe37, l_il) \
V(I64AtomicOr16U, 0xfe38, l_il) \
V(I64AtomicOr32U, 0xfe39, l_il) \
V(I32AtomicXor, 0xfe3a, i_ii) \
V(I64AtomicXor, 0xfe3b, l_il) \
V(I32AtomicXor8U, 0xfe3c, i_ii) \
V(I32AtomicXor16U, 0xfe3d, i_ii) \
V(I64AtomicXor8U, 0xfe3e, l_il) \
V(I64AtomicXor16U, 0xfe3f, l_il) \
V(I64AtomicXor32U, 0xfe40, l_il) \
V(I32AtomicExchange, 0xfe41, i_ii) \
V(I32AtomicExchange8U, 0xfe43, i_ii) \
V(I32AtomicExchange16U, 0xfe44, i_ii) \
V(I32AtomicCompareExchange, 0xfe48, i_iii) \
V(I32AtomicCompareExchange8U, 0xfe4a, i_iii) \
V(I32AtomicCompareExchange16U, 0xfe4b, i_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicLoad, 0xfe10, i_i) \
V(I64AtomicLoad, 0xfe11, l_i) \
V(I32AtomicLoad8U, 0xfe12, i_i) \
V(I32AtomicLoad16U, 0xfe13, i_i) \
V(I64AtomicLoad8U, 0xfe14, l_i) \
V(I64AtomicLoad16U, 0xfe15, l_i) \
V(I64AtomicLoad32U, 0xfe16, l_i) \
V(I32AtomicStore, 0xfe17, v_ii) \
V(I64AtomicStore, 0xfe18, v_il) \
V(I32AtomicStore8U, 0xfe19, v_ii) \
V(I32AtomicStore16U, 0xfe1a, v_ii) \
V(I64AtomicStore8U, 0xfe1b, v_il) \
V(I64AtomicStore16U, 0xfe1c, v_il) \
V(I64AtomicStore32U, 0xfe1d, v_il) \
V(I32AtomicAdd, 0xfe1e, i_ii) \
V(I64AtomicAdd, 0xfe1f, l_il) \
V(I32AtomicAdd8U, 0xfe20, i_ii) \
V(I32AtomicAdd16U, 0xfe21, i_ii) \
V(I64AtomicAdd8U, 0xfe22, l_il) \
V(I64AtomicAdd16U, 0xfe23, l_il) \
V(I64AtomicAdd32U, 0xfe24, l_il) \
V(I32AtomicSub, 0xfe25, i_ii) \
V(I64AtomicSub, 0xfe26, l_il) \
V(I32AtomicSub8U, 0xfe27, i_ii) \
V(I32AtomicSub16U, 0xfe28, i_ii) \
V(I64AtomicSub8U, 0xfe29, l_il) \
V(I64AtomicSub16U, 0xfe2a, l_il) \
V(I64AtomicSub32U, 0xfe2b, l_il) \
V(I32AtomicAnd, 0xfe2c, i_ii) \
V(I64AtomicAnd, 0xfe2d, l_il) \
V(I32AtomicAnd8U, 0xfe2e, i_ii) \
V(I32AtomicAnd16U, 0xfe2f, i_ii) \
V(I64AtomicAnd8U, 0xfe30, l_il) \
V(I64AtomicAnd16U, 0xfe31, l_il) \
V(I64AtomicAnd32U, 0xfe32, l_il) \
V(I32AtomicOr, 0xfe33, i_ii) \
V(I64AtomicOr, 0xfe34, l_il) \
V(I32AtomicOr8U, 0xfe35, i_ii) \
V(I32AtomicOr16U, 0xfe36, i_ii) \
V(I64AtomicOr8U, 0xfe37, l_il) \
V(I64AtomicOr16U, 0xfe38, l_il) \
V(I64AtomicOr32U, 0xfe39, l_il) \
V(I32AtomicXor, 0xfe3a, i_ii) \
V(I64AtomicXor, 0xfe3b, l_il) \
V(I32AtomicXor8U, 0xfe3c, i_ii) \
V(I32AtomicXor16U, 0xfe3d, i_ii) \
V(I64AtomicXor8U, 0xfe3e, l_il) \
V(I64AtomicXor16U, 0xfe3f, l_il) \
V(I64AtomicXor32U, 0xfe40, l_il) \
V(I32AtomicExchange, 0xfe41, i_ii) \
V(I64AtomicExchange, 0xfe42, l_il) \
V(I32AtomicExchange8U, 0xfe43, i_ii) \
V(I32AtomicExchange16U, 0xfe44, i_ii) \
V(I64AtomicExchange8U, 0xfe45, l_il) \
V(I64AtomicExchange16U, 0xfe46, l_il) \
V(I64AtomicExchange32U, 0xfe47, l_il) \
V(I32AtomicCompareExchange, 0xfe48, i_iii) \
V(I64AtomicCompareExchange, 0xfe49, l_ill) \
V(I32AtomicCompareExchange8U, 0xfe4a, i_iii) \
V(I32AtomicCompareExchange16U, 0xfe4b, i_iii) \
V(I64AtomicCompareExchange8U, 0xfe4c, l_ill) \
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
// All opcodes.
#define FOREACH_OPCODE(V) \
......@@ -488,40 +496,41 @@ using WasmName = Vector<const char>;
FOREACH_NUMERIC_OPCODE(V)
// All signatures.
#define FOREACH_SIGNATURE(V) \
FOREACH_SIMD_SIGNATURE(V) \
V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
V(i_i, kWasmI32, kWasmI32) \
V(i_v, kWasmI32) \
V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
V(i_f, kWasmI32, kWasmF32) \
V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
V(i_d, kWasmI32, kWasmF64) \
V(i_l, kWasmI32, kWasmI64) \
V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
V(l_l, kWasmI64, kWasmI64) \
V(l_i, kWasmI64, kWasmI32) \
V(l_f, kWasmI64, kWasmF32) \
V(l_d, kWasmI64, kWasmF64) \
V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
V(f_f, kWasmF32, kWasmF32) \
V(f_d, kWasmF32, kWasmF64) \
V(f_i, kWasmF32, kWasmI32) \
V(f_l, kWasmF32, kWasmI64) \
V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
V(d_d, kWasmF64, kWasmF64) \
V(d_f, kWasmF64, kWasmF32) \
V(d_i, kWasmF64, kWasmI32) \
V(d_l, kWasmF64, kWasmI64) \
V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
V(d_id, kWasmF64, kWasmI32, kWasmF64) \
V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
V(l_il, kWasmI64, kWasmI32, kWasmI64) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32)
#define FOREACH_SIGNATURE(V) \
FOREACH_SIMD_SIGNATURE(V) \
V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
V(i_i, kWasmI32, kWasmI32) \
V(i_v, kWasmI32) \
V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
V(i_f, kWasmI32, kWasmF32) \
V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
V(i_d, kWasmI32, kWasmF64) \
V(i_l, kWasmI32, kWasmI64) \
V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
V(l_l, kWasmI64, kWasmI64) \
V(l_i, kWasmI64, kWasmI32) \
V(l_f, kWasmI64, kWasmF32) \
V(l_d, kWasmI64, kWasmF64) \
V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
V(f_f, kWasmF32, kWasmF32) \
V(f_d, kWasmF32, kWasmF64) \
V(f_i, kWasmF32, kWasmI32) \
V(f_l, kWasmF32, kWasmI64) \
V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
V(d_d, kWasmF64, kWasmF64) \
V(d_f, kWasmF64, kWasmF32) \
V(d_i, kWasmF64, kWasmI32) \
V(d_l, kWasmF64, kWasmI64) \
V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
V(d_id, kWasmF64, kWasmI32, kWasmF64) \
V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
V(l_il, kWasmI64, kWasmI32, kWasmI64) \
V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
......
......@@ -46,6 +46,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
}
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
......@@ -83,6 +86,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
}
void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
......@@ -120,6 +126,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
}
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
......@@ -157,7 +166,91 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
}
WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
}
WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint64_t* memory = r.builder().AddMemoryElems<uint64_t>(8);
BUILD(r, WASM_ATOMICS_TERNARY_OP(
kExprI64AtomicCompareExchange, WASM_I32V_1(0), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1), MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
uint64_t initial = *i;
FOR_UINT64_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*i, *j));
uint64_t expected = CompareExchange(initial, *i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange32U,
WASM_I32V_1(0), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1),
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
uint32_t initial = *i;
FOR_UINT32_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*i, *j));
uint32_t expected = CompareExchange(initial, *i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicCompareExchange16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(r, WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange16U,
WASM_I32V_1(0), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1),
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
uint16_t initial = *i;
FOR_UINT16_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*i, *j));
uint16_t expected = CompareExchange(initial, *i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r,
WASM_ATOMICS_TERNARY_OP(kExprI64AtomicCompareExchange8U, WASM_I32V_1(0),
WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
uint8_t initial = *i;
FOR_UINT8_INPUTS(j) {
r.builder().WriteMemory(&memory[0], initial);
CHECK_EQ(initial, r.Call(*i, *j));
uint8_t expected = CompareExchange(initial, *i, *j);
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
}
} // namespace test_run_wasm_atomics_64
} // namespace wasm
} // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment