Commit 79a60792 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm] Implement I64Atomic Binary operations on ARM64

Bug: v8:6532
Change-Id: I3840df75b745790aaa7e9dec7188adccc70627ce
Reviewed-on: https://chromium-review.googlesource.com/998838Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarMartyn Capewell <martyn.capewell@arm.com>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52484}
parent 231a96bb
...@@ -1749,7 +1749,6 @@ void Assembler::stlr(const Register& rt, const Register& rn) { ...@@ -1749,7 +1749,6 @@ void Assembler::stlr(const Register& rt, const Register& rn) {
void Assembler::stlxr(const Register& rs, const Register& rt, void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) { const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn)); DCHECK(!rs.Is(rt) && !rs.Is(rn));
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x; LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
......
...@@ -2083,6 +2083,8 @@ Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) { ...@@ -2083,6 +2083,8 @@ Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
return TransactionSize::HalfWord; return TransactionSize::HalfWord;
case 4: case 4:
return TransactionSize::Word; return TransactionSize::Word;
case 8:
return TransactionSize::DoubleWord;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -2127,6 +2129,10 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { ...@@ -2127,6 +2129,10 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
case LDAXR_w: case LDAXR_w:
set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
break; break;
case LDAR_x:
case LDAXR_x:
set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
break;
default: default:
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -2150,6 +2156,9 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { ...@@ -2150,6 +2156,9 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
case STLXR_w: case STLXR_w:
MemoryWrite<uint32_t>(address, wreg(rt)); MemoryWrite<uint32_t>(address, wreg(rt));
break; break;
case STLXR_x:
MemoryWrite<uint64_t>(address, xreg(rt));
break;
default: default:
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -2171,6 +2180,9 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { ...@@ -2171,6 +2180,9 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
case STLR_w: case STLR_w:
MemoryWrite<uint32_t>(address, wreg(rt)); MemoryWrite<uint32_t>(address, wreg(rt));
break; break;
case STLR_x:
MemoryWrite<uint64_t>(address, xreg(rt));
break;
default: default:
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -2196,6 +2196,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase { ...@@ -2196,6 +2196,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
Byte = 1, Byte = 1,
HalfWord = 2, HalfWord = 2,
Word = 4, Word = 4,
DoubleWord = 8,
}; };
TransactionSize get_transaction_size(unsigned size); TransactionSize get_transaction_size(unsigned size);
......
...@@ -446,6 +446,18 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, ...@@ -446,6 +446,18 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ Cbnz(i.TempRegister32(2), &binop); \ __ Cbnz(i.TempRegister32(2), &binop); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC64_BINOP(load_instr, store_instr, bin_instr) \
do { \
Label binop; \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ Bind(&binop); \
__ load_instr(i.OutputRegister(), i.TempRegister(0)); \
__ bin_instr(i.TempRegister(1), i.OutputRegister(), \
Operand(i.InputRegister(2))); \
__ store_instr(i.TempRegister(2), i.TempRegister(1), i.TempRegister(0)); \
__ Cbnz(i.TempRegister(2), &binop); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \ #define ASSEMBLE_IEEE754_BINOP(name) \
do { \ do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \ FrameScope scope(tasm(), StackFrame::MANUAL); \
...@@ -1660,6 +1672,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1660,6 +1672,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \ break; \
case kWord32Atomic##op##Uint8: \ case kWord32Atomic##op##Uint8: \
case kArm64Word64Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
break; \ break; \
case kWord32Atomic##op##Int16: \ case kWord32Atomic##op##Int16: \
...@@ -1667,9 +1680,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1667,9 +1680,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \ __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \ break; \
case kWord32Atomic##op##Uint16: \ case kWord32Atomic##op##Uint16: \
case kArm64Word64Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
break; \ break; \
case kWord32Atomic##op##Word32: \ case kWord32Atomic##op##Word32: \
case kArm64Word64Atomic##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \ ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \
break; break;
ATOMIC_BINOP_CASE(Add, Add) ATOMIC_BINOP_CASE(Add, Add)
...@@ -1678,11 +1693,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1678,11 +1693,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr) ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor) ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE #undef ATOMIC_BINOP_CASE
#define ATOMIC64_BINOP_CASE(op, inst) \
case kArm64Word64Atomic##op##Uint64: \
ASSEMBLE_ATOMIC64_BINOP(ldaxr, stlxr, inst); \
break;
ATOMIC64_BINOP_CASE(Add, Add)
ATOMIC64_BINOP_CASE(Sub, Sub)
ATOMIC64_BINOP_CASE(And, And)
ATOMIC64_BINOP_CASE(Or, Orr)
ATOMIC64_BINOP_CASE(Xor, Eor)
#undef ATOMIC64_BINOP_CASE
#undef ASSEMBLE_SHIFT #undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER #undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER #undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_BINOP #undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC64_BINOP
#undef ASSEMBLE_IEEE754_BINOP #undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP #undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
......
...@@ -300,7 +300,27 @@ namespace compiler { ...@@ -300,7 +300,27 @@ namespace compiler {
V(Arm64S1x8AnyTrue) \ V(Arm64S1x8AnyTrue) \
V(Arm64S1x8AllTrue) \ V(Arm64S1x8AllTrue) \
V(Arm64S1x16AnyTrue) \ V(Arm64S1x16AnyTrue) \
V(Arm64S1x16AllTrue) V(Arm64S1x16AllTrue) \
V(Arm64Word64AtomicAddUint8) \
V(Arm64Word64AtomicAddUint16) \
V(Arm64Word64AtomicAddUint32) \
V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicSubUint8) \
V(Arm64Word64AtomicSubUint16) \
V(Arm64Word64AtomicSubUint32) \
V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicAndUint8) \
V(Arm64Word64AtomicAndUint16) \
V(Arm64Word64AtomicAndUint32) \
V(Arm64Word64AtomicAndUint64) \
V(Arm64Word64AtomicOrUint8) \
V(Arm64Word64AtomicOrUint16) \
V(Arm64Word64AtomicOrUint32) \
V(Arm64Word64AtomicOrUint64) \
V(Arm64Word64AtomicXorUint8) \
V(Arm64Word64AtomicXorUint16) \
V(Arm64Word64AtomicXorUint32) \
V(Arm64Word64AtomicXorUint64)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes // Many instructions support multiple addressing modes. Addressing modes
......
...@@ -309,6 +309,28 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -309,6 +309,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64DsbIsb: case kArm64DsbIsb:
return kHasSideEffect; return kHasSideEffect;
case kArm64Word64AtomicAddUint8:
case kArm64Word64AtomicAddUint16:
case kArm64Word64AtomicAddUint32:
case kArm64Word64AtomicAddUint64:
case kArm64Word64AtomicSubUint8:
case kArm64Word64AtomicSubUint16:
case kArm64Word64AtomicSubUint32:
case kArm64Word64AtomicSubUint64:
case kArm64Word64AtomicAndUint8:
case kArm64Word64AtomicAndUint16:
case kArm64Word64AtomicAndUint32:
case kArm64Word64AtomicAndUint64:
case kArm64Word64AtomicOrUint8:
case kArm64Word64AtomicOrUint16:
case kArm64Word64AtomicOrUint32:
case kArm64Word64AtomicOrUint64:
case kArm64Word64AtomicXorUint8:
case kArm64Word64AtomicXorUint16:
case kArm64Word64AtomicXorUint32:
case kArm64Word64AtomicXorUint64:
return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE) COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE #undef CASE
......
...@@ -2729,6 +2729,55 @@ VISIT_ATOMIC_BINOP(Or) ...@@ -2729,6 +2729,55 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor) VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP #undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
opcode = uint16_op;
} else if (type == MachineType::Uint32()) {
opcode = uint32_op;
} else if (type == MachineType::Uint64()) {
opcode = uint64_op;
} else {
UNREACHABLE();
return;
}
AddressingMode addressing_mode = kMode_MRR;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
VisitWord64AtomicBinaryOperation( \
node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \
kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
VISIT_ATOMIC_BINOP(And)
VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -2323,7 +2323,9 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } ...@@ -2323,7 +2323,9 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) { void InstructionSelector::VisitWord64AtomicStore(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif // !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
...@@ -2333,7 +2335,9 @@ void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); } ...@@ -2333,7 +2335,9 @@ void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_X64
void InstructionSelector::VisitWord64AtomicExchange(Node* node) { void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -290,6 +290,7 @@ v8_source_set("cctest_sources") { ...@@ -290,6 +290,7 @@ v8_source_set("cctest_sources") {
"test-sync-primitives-arm64.cc", "test-sync-primitives-arm64.cc",
"test-utils-arm64.cc", "test-utils-arm64.cc",
"test-utils-arm64.h", "test-utils-arm64.h",
"wasm/test-run-wasm-atomics64.cc",
] ]
} else if (v8_current_cpu == "x86") { } else if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ### sources += [ ### gcmole(arch:ia32) ###
......
...@@ -46,9 +46,11 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr) { ...@@ -46,9 +46,11 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor) { WASM_COMPILED_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor); RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange); RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
} }
#endif // V8_TARGET_ARCH_X64
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) { Uint32BinOp expected_op) {
...@@ -86,9 +88,11 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) { ...@@ -86,9 +88,11 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor); RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange); RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
} }
#endif // V8_TARGET_ARCH_X64
void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op, void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
Uint16BinOp expected_op) { Uint16BinOp expected_op) {
...@@ -126,9 +130,11 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) { ...@@ -126,9 +130,11 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor); RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange); RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
} }
#endif // V8_TARGET_ARCH_X64
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) { Uint8BinOp expected_op) {
...@@ -166,6 +172,8 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) { ...@@ -166,6 +172,8 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor); RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange); RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
} }
...@@ -389,6 +397,8 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) { ...@@ -389,6 +397,8 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) {
CHECK_EQ(*i, r.builder().ReadMemory(&memory[0])); CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
} }
} }
#endif // V8_TARGET_ARCH_X64
} // namespace test_run_wasm_atomics_64 } // namespace test_run_wasm_atomics_64
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment