Commit 8d29d92f authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm] Add I64Atomic Load/Store ops

Bug:v8:6532

Change-Id: I62e62f6584d1d42dc8af713b874daafa1f8d4436
Reviewed-on: https://chromium-review.googlesource.com/969991Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52253}
parent fae1ab03
...@@ -1669,8 +1669,15 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1669,8 +1669,15 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node); MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node); return VisitWord32AtomicLoad(node);
} }
case IrOpcode::kWord64AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
case IrOpcode::kWord32AtomicStore: case IrOpcode::kWord32AtomicStore:
return VisitWord32AtomicStore(node); return VisitWord32AtomicStore(node);
case IrOpcode::kWord64AtomicStore:
return VisitWord64AtomicStore(node);
#define ATOMIC_CASE(name, rep) \ #define ATOMIC_CASE(name, rep) \
case IrOpcode::k##rep##Atomic##name: { \ case IrOpcode::k##rep##Atomic##name: { \
MachineType type = AtomicOpRepresentationOf(node->op()); \ MachineType type = AtomicOpRepresentationOf(node->op()); \
...@@ -2311,6 +2318,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { ...@@ -2311,6 +2318,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64 // && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64 #if !V8_TARGET_ARCH_X64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
......
...@@ -38,6 +38,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) { ...@@ -38,6 +38,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() || DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() || IrOpcode::kProtectedLoad == op->opcode() ||
IrOpcode::kWord32AtomicLoad == op->opcode() || IrOpcode::kWord32AtomicLoad == op->opcode() ||
IrOpcode::kWord64AtomicLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() || IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode()); IrOpcode::kUnalignedLoad == op->opcode());
return OpParameter<LoadRepresentation>(op); return OpParameter<LoadRepresentation>(op);
...@@ -78,7 +79,8 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) { ...@@ -78,7 +79,8 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
} }
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) { MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kWord32AtomicStore, op->opcode()); DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
IrOpcode::kWord64AtomicStore == op->opcode());
return OpParameter<MachineRepresentation>(op); return OpParameter<MachineRepresentation>(op);
} }
...@@ -408,6 +410,10 @@ MachineType AtomicOpRepresentationOf(Operator const* op) { ...@@ -408,6 +410,10 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(kWord16) \ V(kWord16) \
V(kWord32) V(kWord32)
#define ATOMIC64_REPRESENTATION_LIST(V) \
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
#define SIMD_LANE_OP_LIST(V) \ #define SIMD_LANE_OP_LIST(V) \
V(F32x4, 4) \ V(F32x4, 4) \
V(I32x4, 4) \ V(I32x4, 4) \
...@@ -577,6 +583,19 @@ struct MachineOperatorGlobalCache { ...@@ -577,6 +583,19 @@ struct MachineOperatorGlobalCache {
ATOMIC_TYPE_LIST(ATOMIC_LOAD) ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD #undef ATOMIC_LOAD
#define ATOMIC_LOAD(Type) \
struct Word64AtomicLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
Word64AtomicLoad##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kWord64AtomicLoad, \
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
ATOMIC64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \ #define ATOMIC_STORE(Type) \
struct Word32AtomicStore##Type##Operator \ struct Word32AtomicStore##Type##Operator \
: public Operator1<MachineRepresentation> { \ : public Operator1<MachineRepresentation> { \
...@@ -591,6 +610,20 @@ struct MachineOperatorGlobalCache { ...@@ -591,6 +610,20 @@ struct MachineOperatorGlobalCache {
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE) ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE #undef ATOMIC_STORE
#define ATOMIC_STORE(Type) \
struct Word64AtomicStore##Type##Operator \
: public Operator1<MachineRepresentation> { \
Word64AtomicStore##Type##Operator() \
: Operator1<MachineRepresentation>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
MachineRepresentation::Type) {} \
}; \
Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
#define ATOMIC_OP(op, type) \ #define ATOMIC_OP(op, type) \
struct op##type##Operator : public Operator1<MachineType> { \ struct op##type##Operator : public Operator1<MachineType> { \
op##type##Operator() \ op##type##Operator() \
...@@ -992,6 +1025,28 @@ const Operator* MachineOperatorBuilder::PoisonOnSpeculationWord() { ...@@ -992,6 +1025,28 @@ const Operator* MachineOperatorBuilder::PoisonOnSpeculationWord() {
return &cache_.kPoisonOnSpeculationWord; return &cache_.kPoisonOnSpeculationWord;
} }
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kWord64AtomicLoad##Type; \
}
ATOMIC64_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicStore(
MachineRepresentation rep) {
#define STORE(kRep) \
if (rep == MachineRepresentation::kRep) { \
return &cache_.kWord64AtomicStore##kRep; \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType rep) { const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType rep) {
#define ADD(kRep) \ #define ADD(kRep) \
if (rep == MachineType::kRep()) { \ if (rep == MachineType::kRep()) { \
...@@ -1127,6 +1182,7 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle( ...@@ -1127,6 +1182,7 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
#undef ATOMIC_TYPE_LIST #undef ATOMIC_TYPE_LIST
#undef ATOMIC64_TYPE_LIST #undef ATOMIC64_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST #undef ATOMIC_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST #undef SIMD_LANE_OP_LIST
#undef SIMD_FORMAT_LIST #undef SIMD_FORMAT_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST #undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
......
...@@ -614,8 +614,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final ...@@ -614,8 +614,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-load [base + index] // atomic-load [base + index]
const Operator* Word32AtomicLoad(LoadRepresentation rep); const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-load [base + index]
const Operator* Word64AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value // atomic-store [base + index], value
const Operator* Word32AtomicStore(MachineRepresentation rep); const Operator* Word32AtomicStore(MachineRepresentation rep);
// atomic-store [base + index], value
const Operator* Word64AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value // atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType rep); const Operator* Word32AtomicExchange(MachineType rep);
// atomic-exchange [base + index], value // atomic-exchange [base + index], value
......
...@@ -636,6 +636,8 @@ ...@@ -636,6 +636,8 @@
V(Word32AtomicAnd) \ V(Word32AtomicAnd) \
V(Word32AtomicOr) \ V(Word32AtomicOr) \
V(Word32AtomicXor) \ V(Word32AtomicXor) \
V(Word64AtomicLoad) \
V(Word64AtomicStore) \
V(Word64AtomicAdd) \ V(Word64AtomicAdd) \
V(Word64AtomicSub) \ V(Word64AtomicSub) \
V(Word64AtomicAnd) \ V(Word64AtomicAnd) \
......
...@@ -1702,6 +1702,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { ...@@ -1702,6 +1702,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32AtomicAnd: case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicOr: case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor: case IrOpcode::kWord32AtomicXor:
case IrOpcode::kWord64AtomicLoad:
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicAdd: case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicSub: case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicAnd: case IrOpcode::kWord64AtomicAnd:
......
...@@ -4514,24 +4514,32 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16], ...@@ -4514,24 +4514,32 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
V(I64AtomicExchange16U, Exchange, Uint16, Word64) \ V(I64AtomicExchange16U, Exchange, Uint16, Word64) \
V(I64AtomicExchange32U, Exchange, Uint32, Word64) V(I64AtomicExchange32U, Exchange, Uint32, Word64)
#define ATOMIC_TERNARY_LIST(V) \ #define ATOMIC_CMP_EXCHG_LIST(V) \
V(I32AtomicCompareExchange, CompareExchange, Uint32, Word32) \ V(I32AtomicCompareExchange, Uint32, Word32) \
V(I64AtomicCompareExchange, CompareExchange, Uint64, Word64) \ V(I64AtomicCompareExchange, Uint64, Word64) \
V(I32AtomicCompareExchange8U, CompareExchange, Uint8, Word32) \ V(I32AtomicCompareExchange8U, Uint8, Word32) \
V(I32AtomicCompareExchange16U, CompareExchange, Uint16, Word32) \ V(I32AtomicCompareExchange16U, Uint16, Word32) \
V(I64AtomicCompareExchange8U, CompareExchange, Uint8, Word64) \ V(I64AtomicCompareExchange8U, Uint8, Word64) \
V(I64AtomicCompareExchange16U, CompareExchange, Uint16, Word64) \ V(I64AtomicCompareExchange16U, Uint16, Word64) \
V(I64AtomicCompareExchange32U, CompareExchange, Uint32, Word64) V(I64AtomicCompareExchange32U, Uint32, Word64)
#define ATOMIC_LOAD_LIST(V) \ #define ATOMIC_LOAD_LIST(V) \
V(I32AtomicLoad, Uint32) \ V(I32AtomicLoad, Uint32, Word32) \
V(I32AtomicLoad8U, Uint8) \ V(I64AtomicLoad, Uint64, Word64) \
V(I32AtomicLoad16U, Uint16) V(I32AtomicLoad8U, Uint8, Word32) \
V(I32AtomicLoad16U, Uint16, Word32) \
#define ATOMIC_STORE_LIST(V) \ V(I64AtomicLoad8U, Uint8, Word64) \
V(I32AtomicStore, Uint32, kWord32) \ V(I64AtomicLoad16U, Uint16, Word64) \
V(I32AtomicStore8U, Uint8, kWord8) \ V(I64AtomicLoad32U, Uint32, Word64)
V(I32AtomicStore16U, Uint16, kWord16)
#define ATOMIC_STORE_LIST(V) \
V(I32AtomicStore, Uint32, kWord32, Word32) \
V(I64AtomicStore, Uint64, kWord64, Word64) \
V(I32AtomicStore8U, Uint8, kWord8, Word32) \
V(I32AtomicStore16U, Uint16, kWord16, Word32) \
V(I64AtomicStore8U, Uint8, kWord8, Word64) \
V(I64AtomicStore16U, Uint16, kWord16, Word64) \
V(I64AtomicStore32U, Uint32, kWord32, Word64)
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset, uint32_t alignment, uint32_t offset,
...@@ -4552,41 +4560,42 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4552,41 +4560,42 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP) ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP #undef BUILD_ATOMIC_BINOP
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type, Prefix) \ #define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
case wasm::kExpr##Name: { \ case wasm::kExpr##Name: { \
Node* index = \ Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \ inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \ node = graph()->NewNode( \
jsgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \ jsgraph()->machine()->Prefix##AtomicCompareExchange( \
MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \ MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
break; \ break; \
} }
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP) ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
#undef BUILD_ATOMIC_TERNARY_OP #undef BUILD_ATOMIC_CMP_EXCHG
#define BUILD_ATOMIC_LOAD_OP(Name, Type) \ #define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
case wasm::kExpr##Name: { \ case wasm::kExpr##Name: { \
Node* index = \ Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \ inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \ node = graph()->NewNode( \
jsgraph()->machine()->Word32AtomicLoad(MachineType::Type()), \ jsgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
MemBuffer(offset), index, *effect_, *control_); \ MemBuffer(offset), index, *effect_, *control_); \
break; \ break; \
} }
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP) ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP #undef BUILD_ATOMIC_LOAD_OP
#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \ #define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
case wasm::kExpr##Name: { \ case wasm::kExpr##Name: { \
Node* index = \ Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \ inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \ node = graph()->NewNode( \
jsgraph()->machine()->Word32AtomicStore(MachineRepresentation::Rep), \ jsgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
MemBuffer(offset), index, inputs[1], *effect_, *control_); \ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
break; \ break; \
} }
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP) ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP #undef BUILD_ATOMIC_STORE_OP
...@@ -4598,7 +4607,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs, ...@@ -4598,7 +4607,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
} }
#undef ATOMIC_BINOP_LIST #undef ATOMIC_BINOP_LIST
#undef ATOMIC_TERNARY_LIST #undef ATOMIC_CMP_EXCHG_LIST
#undef ATOMIC_LOAD_LIST #undef ATOMIC_LOAD_LIST
#undef ATOMIC_STORE_LIST #undef ATOMIC_STORE_LIST
......
...@@ -2825,6 +2825,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2825,6 +2825,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicStoreWord8: case kWord32AtomicStoreWord8:
case kWord32AtomicStoreWord16: case kWord32AtomicStoreWord16:
case kWord32AtomicStoreWord32: case kWord32AtomicStoreWord32:
case kX64Word64AtomicLoadUint8:
case kX64Word64AtomicLoadUint16:
case kX64Word64AtomicLoadUint32:
case kX64Word64AtomicLoadUint64:
case kX64Word64AtomicStoreWord8:
case kX64Word64AtomicStoreWord16:
case kX64Word64AtomicStoreWord32:
case kX64Word64AtomicStoreWord64:
UNREACHABLE(); // Won't be generated by instruction selector. UNREACHABLE(); // Won't be generated by instruction selector.
break; break;
} }
......
...@@ -235,6 +235,14 @@ namespace compiler { ...@@ -235,6 +235,14 @@ namespace compiler {
V(X64S128Not) \ V(X64S128Not) \
V(X64S128Select) \ V(X64S128Select) \
V(X64S128Zero) \ V(X64S128Zero) \
V(X64Word64AtomicLoadUint8) \
V(X64Word64AtomicLoadUint16) \
V(X64Word64AtomicLoadUint32) \
V(X64Word64AtomicLoadUint64) \
V(X64Word64AtomicStoreWord8) \
V(X64Word64AtomicStoreWord16) \
V(X64Word64AtomicStoreWord32) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicAddUint8) \ V(X64Word64AtomicAddUint8) \
V(X64Word64AtomicAddUint16) \ V(X64Word64AtomicAddUint16) \
V(X64Word64AtomicAddUint32) \ V(X64Word64AtomicAddUint32) \
......
...@@ -267,6 +267,16 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -267,6 +267,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence: case kLFence:
return kHasSideEffect; return kHasSideEffect;
case kX64Word64AtomicLoadUint8:
case kX64Word64AtomicLoadUint16:
case kX64Word64AtomicLoadUint32:
case kX64Word64AtomicLoadUint64:
return kIsLoadOperation;
case kX64Word64AtomicStoreWord8:
case kX64Word64AtomicStoreWord16:
case kX64Word64AtomicStoreWord32:
case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint8: case kX64Word64AtomicAddUint8:
case kX64Word64AtomicAddUint16: case kX64Word64AtomicAddUint16:
case kX64Word64AtomicAddUint32: case kX64Word64AtomicAddUint32:
......
...@@ -2119,6 +2119,12 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) { ...@@ -2119,6 +2119,12 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
VisitLoad(node); VisitLoad(node);
} }
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
USE(load_rep);
VisitLoad(node);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) { void InstructionSelector::VisitWord32AtomicStore(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
...@@ -2157,6 +2163,48 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) { ...@@ -2157,6 +2163,48 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs); Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
} }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
opcode = kX64Word64AtomicExchangeUint8;
break;
case MachineRepresentation::kWord16:
opcode = kX64Word64AtomicExchangeUint16;
break;
case MachineRepresentation::kWord32:
opcode = kX64Word64AtomicExchangeUint32;
break;
case MachineRepresentation::kWord64:
opcode = kX64Word64AtomicExchangeUint64;
break;
default:
UNREACHABLE();
return;
}
AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
inputs[input_count++] = g.UseUniqueRegister(index);
addressing_mode = kMode_MR1;
}
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
DCHECK_LE(input_count, arraysize(inputs));
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) { void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
X64OperandGenerator g(this); X64OperandGenerator g(this);
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
......
...@@ -56,8 +56,12 @@ struct WasmException; ...@@ -56,8 +56,12 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \ #define ATOMIC_OP_LIST(V) \
V(I32AtomicLoad, Uint32) \ V(I32AtomicLoad, Uint32) \
V(I64AtomicLoad, Uint64) \
V(I32AtomicLoad8U, Uint8) \ V(I32AtomicLoad8U, Uint8) \
V(I32AtomicLoad16U, Uint16) \ V(I32AtomicLoad16U, Uint16) \
V(I64AtomicLoad8U, Uint8) \
V(I64AtomicLoad16U, Uint16) \
V(I64AtomicLoad32U, Uint32) \
V(I32AtomicAdd, Uint32) \ V(I32AtomicAdd, Uint32) \
V(I32AtomicAdd8U, Uint8) \ V(I32AtomicAdd8U, Uint8) \
V(I32AtomicAdd16U, Uint16) \ V(I32AtomicAdd16U, Uint16) \
...@@ -110,8 +114,12 @@ struct WasmException; ...@@ -110,8 +114,12 @@ struct WasmException;
#define ATOMIC_STORE_OP_LIST(V) \ #define ATOMIC_STORE_OP_LIST(V) \
V(I32AtomicStore, Uint32) \ V(I32AtomicStore, Uint32) \
V(I64AtomicStore, Uint64) \
V(I32AtomicStore8U, Uint8) \ V(I32AtomicStore8U, Uint8) \
V(I32AtomicStore16U, Uint16) V(I32AtomicStore16U, Uint16) \
V(I64AtomicStore8U, Uint8) \
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
template <typename T, typename Allocator> template <typename T, typename Allocator>
Vector<T> vec2vec(std::vector<T, Allocator>& vec) { Vector<T> vec2vec(std::vector<T, Allocator>& vec) {
......
...@@ -251,6 +251,144 @@ WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) { ...@@ -251,6 +251,144 @@ WASM_COMPILED_EXEC_TEST(I32AtomicCompareExchange8U) {
} }
} }
} }
WASM_COMPILED_EXEC_TEST(I64AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint64_t* memory = r.builder().AddMemoryElems<uint64_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
uint64_t expected = *i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad32U, WASM_ZERO,
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
uint32_t expected = *i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad16U, WASM_ZERO,
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
uint16_t expected = *i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
uint8_t expected = *i;
r.builder().WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint64_t* memory = r.builder().AddMemoryElems<uint64_t>(8);
BUILD(r,
WASM_ATOMICS_STORE_OP(kExprI64AtomicStore, WASM_ZERO, WASM_GET_LOCAL(0),
MachineRepresentation::kWord64),
WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad, WASM_ZERO,
MachineRepresentation::kWord64));
FOR_UINT64_INPUTS(i) {
uint64_t expected = *i;
CHECK_EQ(expected, r.Call(*i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
BUILD(
r,
WASM_ATOMICS_STORE_OP(kExprI64AtomicStore32U, WASM_ZERO,
WASM_GET_LOCAL(0), MachineRepresentation::kWord32),
WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad32U, WASM_ZERO,
MachineRepresentation::kWord32));
FOR_UINT32_INPUTS(i) {
uint32_t expected = *i;
CHECK_EQ(expected, r.Call(*i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint16_t* memory = r.builder().AddMemoryElems<uint16_t>(8);
BUILD(
r,
WASM_ATOMICS_STORE_OP(kExprI64AtomicStore16U, WASM_ZERO,
WASM_GET_LOCAL(0), MachineRepresentation::kWord16),
WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad16U, WASM_ZERO,
MachineRepresentation::kWord16));
FOR_UINT16_INPUTS(i) {
uint16_t expected = *i;
CHECK_EQ(expected, r.Call(*i));
CHECK_EQ(expected, r.builder().ReadMemory(&memory[0]));
}
}
WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint64_t, uint64_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
BUILD(r,
WASM_ATOMICS_STORE_OP(kExprI64AtomicStore8U, WASM_ZERO,
WASM_GET_LOCAL(0), MachineRepresentation::kWord8),
WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
MachineRepresentation::kWord8));
FOR_UINT8_INPUTS(i) {
uint8_t expected = *i;
CHECK_EQ(expected, r.Call(*i));
CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
}
}
} // namespace test_run_wasm_atomics_64 } // namespace test_run_wasm_atomics_64
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment