Commit 10233179 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm] Add Remaining I64Atomic operations for ARM64

 - Add Implementation for I64Atomic{Load, Store, Exchange,
CompareExchange} for supported MemTypes/Representations
 - Refactoring to simplify instruction selection
 - Enable tests for ARM64

Bug: v8:6532
Change-Id: I4c4a65fd3bbdc6955eda29d7e08d6eef29c55628
Reviewed-on: https://chromium-review.googlesource.com/1003225Reviewed-by: 's avatarMartyn Capewell <martyn.capewell@arm.com>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52598}
parent 39d546a2
...@@ -395,67 +395,52 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, ...@@ -395,67 +395,52 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \ } \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \
do { \ do { \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ asm_instr(i.OutputRegister32(), i.TempRegister(0)); \ __ asm_instr(i.Output##reg(), i.TempRegister(0)); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \
do { \ do { \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ asm_instr(i.InputRegister32(2), i.TempRegister(0)); \ __ asm_instr(i.Input##reg(2), i.TempRegister(0)); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \ #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr, reg) \
do { \ do { \
Label exchange; \ Label exchange; \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ Bind(&exchange); \ __ Bind(&exchange); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ __ load_instr(i.Output##reg(), i.TempRegister(0)); \
__ store_instr(i.TempRegister32(1), i.InputRegister32(2), \ __ store_instr(i.TempRegister32(1), i.Input##reg(2), i.TempRegister(0)); \
i.TempRegister(0)); \ __ Cbnz(i.TempRegister32(1), &exchange); \
__ Cbnz(i.TempRegister32(1), &exchange); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext) \ #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext, \
reg) \
do { \ do { \
Label compareExchange; \ Label compareExchange; \
Label exit; \ Label exit; \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ Bind(&compareExchange); \ __ Bind(&compareExchange); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ __ load_instr(i.Output##reg(), i.TempRegister(0)); \
__ Cmp(i.OutputRegister32(), Operand(i.InputRegister32(2), ext)); \ __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \
__ B(ne, &exit); \ __ B(ne, &exit); \
__ store_instr(i.TempRegister32(1), i.InputRegister32(3), \ __ store_instr(i.TempRegister32(1), i.Input##reg(3), i.TempRegister(0)); \
i.TempRegister(0)); \
__ Cbnz(i.TempRegister32(1), &compareExchange); \ __ Cbnz(i.TempRegister32(1), &compareExchange); \
__ Bind(&exit); \ __ Bind(&exit); \
} while (0) } while (0)
#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \ #define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr, reg) \
do { \
Label binop; \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ Bind(&binop); \
__ load_instr(i.OutputRegister32(), i.TempRegister(0)); \
__ bin_instr(i.TempRegister32(1), i.OutputRegister32(), \
Operand(i.InputRegister32(2))); \
__ store_instr(i.TempRegister32(2), i.TempRegister32(1), \
i.TempRegister(0)); \
__ Cbnz(i.TempRegister32(2), &binop); \
} while (0)
#define ASSEMBLE_ATOMIC64_BINOP(load_instr, store_instr, bin_instr) \
do { \ do { \
Label binop; \ Label binop; \
__ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ Bind(&binop); \ __ Bind(&binop); \
__ load_instr(i.OutputRegister(), i.TempRegister(0)); \ __ load_instr(i.Output##reg(), i.TempRegister(0)); \
__ bin_instr(i.TempRegister(1), i.OutputRegister(), \ __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \
Operand(i.InputRegister(2))); \ __ store_instr(i.TempRegister32(2), i.Temp##reg(1), i.TempRegister(0)); \
__ store_instr(i.TempRegister(2), i.TempRegister(1), i.TempRegister(0)); \ __ Cbnz(i.TempRegister32(2), &binop); \
__ Cbnz(i.TempRegister(2), &binop); \
} while (0) } while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \ #define ASSEMBLE_IEEE754_BINOP(name) \
...@@ -1606,85 +1591,116 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1606,85 +1591,116 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(kSpeculationPoisonRegister)); Operand(kSpeculationPoisonRegister));
break; break;
case kWord32AtomicLoadInt8: case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb); ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicLoadUint8: case kWord32AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb); case kArm64Word64AtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
break; break;
case kWord32AtomicLoadInt16: case kWord32AtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh); ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicLoadUint16: case kWord32AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh); case kArm64Word64AtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
break; break;
case kWord32AtomicLoadWord32: case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar); case kArm64Word64AtomicLoadUint32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32);
break;
case kArm64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register);
break; break;
case kWord32AtomicStoreWord8: case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb); case kArm64Word64AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32);
break; break;
case kWord32AtomicStoreWord16: case kWord32AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh); case kArm64Word64AtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32);
break; break;
case kWord32AtomicStoreWord32: case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr); case kArm64Word64AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32);
break;
case kArm64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break; break;
case kWord32AtomicExchangeInt8: case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicExchangeUint8: case kWord32AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb); case kArm64Word64AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
break; break;
case kWord32AtomicExchangeInt16: case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh); ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicExchangeUint16: case kWord32AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh); case kArm64Word64AtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
break; break;
case kWord32AtomicExchangeWord32: case kWord32AtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr); case kArm64Word64AtomicExchangeUint32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
break; break;
case kWord32AtomicCompareExchangeInt8: case kWord32AtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB); ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicCompareExchangeUint8: case kWord32AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB); case kArm64Word64AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
break; break;
case kWord32AtomicCompareExchangeInt16: case kWord32AtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH); ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); __ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break; break;
case kWord32AtomicCompareExchangeUint16: case kWord32AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH); case kArm64Word64AtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
break; break;
case kWord32AtomicCompareExchangeWord32: case kWord32AtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW); case kArm64Word64AtomicCompareExchangeUint32:
break; ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
#define ATOMIC_BINOP_CASE(op, inst) \ break;
case kWord32Atomic##op##Int8: \ case kArm64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ break;
break; \ #define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Uint8: \ case kWord32Atomic##op##Int8: \
case kArm64Word64Atomic##op##Uint8: \ ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \ break; \
case kWord32Atomic##op##Int16: \ case kWord32Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ case kArm64Word64Atomic##op##Uint8: \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \ ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
break; \ break; \
case kWord32Atomic##op##Uint16: \ case kWord32Atomic##op##Int16: \
case kArm64Word64Atomic##op##Uint16: \ ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \ break; \
case kWord32Atomic##op##Word32: \ case kWord32Atomic##op##Uint16: \
case kArm64Word64Atomic##op##Uint32: \ case kArm64Word64Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \ ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
break; \
case kWord32Atomic##op##Word32: \
case kArm64Word64Atomic##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
break; \
case kArm64Word64Atomic##op##Uint64: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register); \
break; break;
ATOMIC_BINOP_CASE(Add, Add) ATOMIC_BINOP_CASE(Add, Add)
ATOMIC_BINOP_CASE(Sub, Sub) ATOMIC_BINOP_CASE(Sub, Sub)
...@@ -1692,25 +1708,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1692,25 +1708,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr) ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor) ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE #undef ATOMIC_BINOP_CASE
#define ATOMIC64_BINOP_CASE(op, inst) \
case kArm64Word64Atomic##op##Uint64: \
ASSEMBLE_ATOMIC64_BINOP(ldaxr, stlxr, inst); \
break;
ATOMIC64_BINOP_CASE(Add, Add)
ATOMIC64_BINOP_CASE(Sub, Sub)
ATOMIC64_BINOP_CASE(And, And)
ATOMIC64_BINOP_CASE(Or, Orr)
ATOMIC64_BINOP_CASE(Xor, Eor)
#undef ATOMIC64_BINOP_CASE
#undef ASSEMBLE_SHIFT #undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER #undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER #undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_BINOP #undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC64_BINOP
#undef ASSEMBLE_IEEE754_BINOP #undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP #undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#define SIMD_UNOP_CASE(Op, Instr, FORMAT) \ #define SIMD_UNOP_CASE(Op, Instr, FORMAT) \
case Op: \ case Op: \
......
...@@ -11,316 +11,332 @@ namespace compiler { ...@@ -11,316 +11,332 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit. // ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction. // Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \ #define TARGET_ARCH_OPCODE_LIST(V) \
V(Arm64Add) \ V(Arm64Add) \
V(Arm64Add32) \ V(Arm64Add32) \
V(Arm64And) \ V(Arm64And) \
V(Arm64And32) \ V(Arm64And32) \
V(Arm64Bic) \ V(Arm64Bic) \
V(Arm64Bic32) \ V(Arm64Bic32) \
V(Arm64Clz) \ V(Arm64Clz) \
V(Arm64Clz32) \ V(Arm64Clz32) \
V(Arm64Cmp) \ V(Arm64Cmp) \
V(Arm64Cmp32) \ V(Arm64Cmp32) \
V(Arm64Cmn) \ V(Arm64Cmn) \
V(Arm64Cmn32) \ V(Arm64Cmn32) \
V(Arm64Tst) \ V(Arm64Tst) \
V(Arm64Tst32) \ V(Arm64Tst32) \
V(Arm64Or) \ V(Arm64Or) \
V(Arm64Or32) \ V(Arm64Or32) \
V(Arm64Orn) \ V(Arm64Orn) \
V(Arm64Orn32) \ V(Arm64Orn32) \
V(Arm64Eor) \ V(Arm64Eor) \
V(Arm64Eor32) \ V(Arm64Eor32) \
V(Arm64Eon) \ V(Arm64Eon) \
V(Arm64Eon32) \ V(Arm64Eon32) \
V(Arm64Sub) \ V(Arm64Sub) \
V(Arm64Sub32) \ V(Arm64Sub32) \
V(Arm64Mul) \ V(Arm64Mul) \
V(Arm64Mul32) \ V(Arm64Mul32) \
V(Arm64Smull) \ V(Arm64Smull) \
V(Arm64Umull) \ V(Arm64Umull) \
V(Arm64Madd) \ V(Arm64Madd) \
V(Arm64Madd32) \ V(Arm64Madd32) \
V(Arm64Msub) \ V(Arm64Msub) \
V(Arm64Msub32) \ V(Arm64Msub32) \
V(Arm64Mneg) \ V(Arm64Mneg) \
V(Arm64Mneg32) \ V(Arm64Mneg32) \
V(Arm64Idiv) \ V(Arm64Idiv) \
V(Arm64Idiv32) \ V(Arm64Idiv32) \
V(Arm64Udiv) \ V(Arm64Udiv) \
V(Arm64Udiv32) \ V(Arm64Udiv32) \
V(Arm64Imod) \ V(Arm64Imod) \
V(Arm64Imod32) \ V(Arm64Imod32) \
V(Arm64Umod) \ V(Arm64Umod) \
V(Arm64Umod32) \ V(Arm64Umod32) \
V(Arm64Not) \ V(Arm64Not) \
V(Arm64Not32) \ V(Arm64Not32) \
V(Arm64Lsl) \ V(Arm64Lsl) \
V(Arm64Lsl32) \ V(Arm64Lsl32) \
V(Arm64Lsr) \ V(Arm64Lsr) \
V(Arm64Lsr32) \ V(Arm64Lsr32) \
V(Arm64Asr) \ V(Arm64Asr) \
V(Arm64Asr32) \ V(Arm64Asr32) \
V(Arm64Ror) \ V(Arm64Ror) \
V(Arm64Ror32) \ V(Arm64Ror32) \
V(Arm64Mov32) \ V(Arm64Mov32) \
V(Arm64Sxtb32) \ V(Arm64Sxtb32) \
V(Arm64Sxth32) \ V(Arm64Sxth32) \
V(Arm64Sxtb) \ V(Arm64Sxtb) \
V(Arm64Sxth) \ V(Arm64Sxth) \
V(Arm64Sxtw) \ V(Arm64Sxtw) \
V(Arm64Sbfx32) \ V(Arm64Sbfx32) \
V(Arm64Ubfx) \ V(Arm64Ubfx) \
V(Arm64Ubfx32) \ V(Arm64Ubfx32) \
V(Arm64Ubfiz32) \ V(Arm64Ubfiz32) \
V(Arm64Bfi) \ V(Arm64Bfi) \
V(Arm64Rbit) \ V(Arm64Rbit) \
V(Arm64Rbit32) \ V(Arm64Rbit32) \
V(Arm64TestAndBranch32) \ V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \ V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \ V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \ V(Arm64CompareAndBranch) \
V(Arm64Claim) \ V(Arm64Claim) \
V(Arm64Poke) \ V(Arm64Poke) \
V(Arm64PokePair) \ V(Arm64PokePair) \
V(Arm64Peek) \ V(Arm64Peek) \
V(Arm64Float32Cmp) \ V(Arm64Float32Cmp) \
V(Arm64Float32Add) \ V(Arm64Float32Add) \
V(Arm64Float32Sub) \ V(Arm64Float32Sub) \
V(Arm64Float32Mul) \ V(Arm64Float32Mul) \
V(Arm64Float32Div) \ V(Arm64Float32Div) \
V(Arm64Float32Abs) \ V(Arm64Float32Abs) \
V(Arm64Float32Neg) \ V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \ V(Arm64Float32Sqrt) \
V(Arm64Float32RoundDown) \ V(Arm64Float32RoundDown) \
V(Arm64Float32Max) \ V(Arm64Float32Max) \
V(Arm64Float32Min) \ V(Arm64Float32Min) \
V(Arm64Float64Cmp) \ V(Arm64Float64Cmp) \
V(Arm64Float64Add) \ V(Arm64Float64Add) \
V(Arm64Float64Sub) \ V(Arm64Float64Sub) \
V(Arm64Float64Mul) \ V(Arm64Float64Mul) \
V(Arm64Float64Div) \ V(Arm64Float64Div) \
V(Arm64Float64Mod) \ V(Arm64Float64Mod) \
V(Arm64Float64Max) \ V(Arm64Float64Max) \
V(Arm64Float64Min) \ V(Arm64Float64Min) \
V(Arm64Float64Abs) \ V(Arm64Float64Abs) \
V(Arm64Float64Neg) \ V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \ V(Arm64Float64Sqrt) \
V(Arm64Float64RoundDown) \ V(Arm64Float64RoundDown) \
V(Arm64Float32RoundUp) \ V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \ V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \ V(Arm64Float64RoundTiesAway) \
V(Arm64Float32RoundTruncate) \ V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \ V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \ V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \ V(Arm64Float64RoundTiesEven) \
V(Arm64Float64SilenceNaN) \ V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \ V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \ V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \ V(Arm64Float32ToInt32) \
V(Arm64Float64ToInt32) \ V(Arm64Float64ToInt32) \
V(Arm64Float32ToUint32) \ V(Arm64Float32ToUint32) \
V(Arm64Float64ToUint32) \ V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \ V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \ V(Arm64Float64ToInt64) \
V(Arm64Float32ToUint64) \ V(Arm64Float32ToUint64) \
V(Arm64Float64ToUint64) \ V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat32) \ V(Arm64Int32ToFloat32) \
V(Arm64Int32ToFloat64) \ V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \ V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \ V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat32) \ V(Arm64Uint32ToFloat32) \
V(Arm64Uint32ToFloat64) \ V(Arm64Uint32ToFloat64) \
V(Arm64Uint64ToFloat32) \ V(Arm64Uint64ToFloat32) \
V(Arm64Uint64ToFloat64) \ V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \ V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \ V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \ V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \ V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \ V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \ V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \ V(Arm64LdrS) \
V(Arm64StrS) \ V(Arm64StrS) \
V(Arm64LdrD) \ V(Arm64LdrD) \
V(Arm64StrD) \ V(Arm64StrD) \
V(Arm64LdrQ) \ V(Arm64LdrQ) \
V(Arm64StrQ) \ V(Arm64StrQ) \
V(Arm64Ldrb) \ V(Arm64Ldrb) \
V(Arm64Ldrsb) \ V(Arm64Ldrsb) \
V(Arm64Strb) \ V(Arm64Strb) \
V(Arm64Ldrh) \ V(Arm64Ldrh) \
V(Arm64Ldrsh) \ V(Arm64Ldrsh) \
V(Arm64Strh) \ V(Arm64Strh) \
V(Arm64Ldrsw) \ V(Arm64Ldrsw) \
V(Arm64LdrW) \ V(Arm64LdrW) \
V(Arm64StrW) \ V(Arm64StrW) \
V(Arm64Ldr) \ V(Arm64Ldr) \
V(Arm64Str) \ V(Arm64Str) \
V(Arm64DsbIsb) \ V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \ V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \ V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \ V(Arm64F32x4ReplaceLane) \
V(Arm64F32x4SConvertI32x4) \ V(Arm64F32x4SConvertI32x4) \
V(Arm64F32x4UConvertI32x4) \ V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4Abs) \ V(Arm64F32x4Abs) \
V(Arm64F32x4Neg) \ V(Arm64F32x4Neg) \
V(Arm64F32x4RecipApprox) \ V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \ V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Add) \ V(Arm64F32x4Add) \
V(Arm64F32x4AddHoriz) \ V(Arm64F32x4AddHoriz) \
V(Arm64F32x4Sub) \ V(Arm64F32x4Sub) \
V(Arm64F32x4Mul) \ V(Arm64F32x4Mul) \
V(Arm64F32x4Min) \ V(Arm64F32x4Min) \
V(Arm64F32x4Max) \ V(Arm64F32x4Max) \
V(Arm64F32x4Eq) \ V(Arm64F32x4Eq) \
V(Arm64F32x4Ne) \ V(Arm64F32x4Ne) \
V(Arm64F32x4Lt) \ V(Arm64F32x4Lt) \
V(Arm64F32x4Le) \ V(Arm64F32x4Le) \
V(Arm64I32x4Splat) \ V(Arm64I32x4Splat) \
V(Arm64I32x4ExtractLane) \ V(Arm64I32x4ExtractLane) \
V(Arm64I32x4ReplaceLane) \ V(Arm64I32x4ReplaceLane) \
V(Arm64I32x4SConvertF32x4) \ V(Arm64I32x4SConvertF32x4) \
V(Arm64I32x4SConvertI16x8Low) \ V(Arm64I32x4SConvertI16x8Low) \
V(Arm64I32x4SConvertI16x8High) \ V(Arm64I32x4SConvertI16x8High) \
V(Arm64I32x4Neg) \ V(Arm64I32x4Neg) \
V(Arm64I32x4Shl) \ V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \ V(Arm64I32x4ShrS) \
V(Arm64I32x4Add) \ V(Arm64I32x4Add) \
V(Arm64I32x4AddHoriz) \ V(Arm64I32x4AddHoriz) \
V(Arm64I32x4Sub) \ V(Arm64I32x4Sub) \
V(Arm64I32x4Mul) \ V(Arm64I32x4Mul) \
V(Arm64I32x4MinS) \ V(Arm64I32x4MinS) \
V(Arm64I32x4MaxS) \ V(Arm64I32x4MaxS) \
V(Arm64I32x4Eq) \ V(Arm64I32x4Eq) \
V(Arm64I32x4Ne) \ V(Arm64I32x4Ne) \
V(Arm64I32x4GtS) \ V(Arm64I32x4GtS) \
V(Arm64I32x4GeS) \ V(Arm64I32x4GeS) \
V(Arm64I32x4UConvertF32x4) \ V(Arm64I32x4UConvertF32x4) \
V(Arm64I32x4UConvertI16x8Low) \ V(Arm64I32x4UConvertI16x8Low) \
V(Arm64I32x4UConvertI16x8High) \ V(Arm64I32x4UConvertI16x8High) \
V(Arm64I32x4ShrU) \ V(Arm64I32x4ShrU) \
V(Arm64I32x4MinU) \ V(Arm64I32x4MinU) \
V(Arm64I32x4MaxU) \ V(Arm64I32x4MaxU) \
V(Arm64I32x4GtU) \ V(Arm64I32x4GtU) \
V(Arm64I32x4GeU) \ V(Arm64I32x4GeU) \
V(Arm64I16x8Splat) \ V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLane) \ V(Arm64I16x8ExtractLane) \
V(Arm64I16x8ReplaceLane) \ V(Arm64I16x8ReplaceLane) \
V(Arm64I16x8SConvertI8x16Low) \ V(Arm64I16x8SConvertI8x16Low) \
V(Arm64I16x8SConvertI8x16High) \ V(Arm64I16x8SConvertI8x16High) \
V(Arm64I16x8Neg) \ V(Arm64I16x8Neg) \
V(Arm64I16x8Shl) \ V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \ V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \ V(Arm64I16x8SConvertI32x4) \
V(Arm64I16x8Add) \ V(Arm64I16x8Add) \
V(Arm64I16x8AddSaturateS) \ V(Arm64I16x8AddSaturateS) \
V(Arm64I16x8AddHoriz) \ V(Arm64I16x8AddHoriz) \
V(Arm64I16x8Sub) \ V(Arm64I16x8Sub) \
V(Arm64I16x8SubSaturateS) \ V(Arm64I16x8SubSaturateS) \
V(Arm64I16x8Mul) \ V(Arm64I16x8Mul) \
V(Arm64I16x8MinS) \ V(Arm64I16x8MinS) \
V(Arm64I16x8MaxS) \ V(Arm64I16x8MaxS) \
V(Arm64I16x8Eq) \ V(Arm64I16x8Eq) \
V(Arm64I16x8Ne) \ V(Arm64I16x8Ne) \
V(Arm64I16x8GtS) \ V(Arm64I16x8GtS) \
V(Arm64I16x8GeS) \ V(Arm64I16x8GeS) \
V(Arm64I16x8UConvertI8x16Low) \ V(Arm64I16x8UConvertI8x16Low) \
V(Arm64I16x8UConvertI8x16High) \ V(Arm64I16x8UConvertI8x16High) \
V(Arm64I16x8ShrU) \ V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \ V(Arm64I16x8UConvertI32x4) \
V(Arm64I16x8AddSaturateU) \ V(Arm64I16x8AddSaturateU) \
V(Arm64I16x8SubSaturateU) \ V(Arm64I16x8SubSaturateU) \
V(Arm64I16x8MinU) \ V(Arm64I16x8MinU) \
V(Arm64I16x8MaxU) \ V(Arm64I16x8MaxU) \
V(Arm64I16x8GtU) \ V(Arm64I16x8GtU) \
V(Arm64I16x8GeU) \ V(Arm64I16x8GeU) \
V(Arm64I8x16Splat) \ V(Arm64I8x16Splat) \
V(Arm64I8x16ExtractLane) \ V(Arm64I8x16ExtractLane) \
V(Arm64I8x16ReplaceLane) \ V(Arm64I8x16ReplaceLane) \
V(Arm64I8x16Neg) \ V(Arm64I8x16Neg) \
V(Arm64I8x16Shl) \ V(Arm64I8x16Shl) \
V(Arm64I8x16ShrS) \ V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \ V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16Add) \ V(Arm64I8x16Add) \
V(Arm64I8x16AddSaturateS) \ V(Arm64I8x16AddSaturateS) \
V(Arm64I8x16Sub) \ V(Arm64I8x16Sub) \
V(Arm64I8x16SubSaturateS) \ V(Arm64I8x16SubSaturateS) \
V(Arm64I8x16Mul) \ V(Arm64I8x16Mul) \
V(Arm64I8x16MinS) \ V(Arm64I8x16MinS) \
V(Arm64I8x16MaxS) \ V(Arm64I8x16MaxS) \
V(Arm64I8x16Eq) \ V(Arm64I8x16Eq) \
V(Arm64I8x16Ne) \ V(Arm64I8x16Ne) \
V(Arm64I8x16GtS) \ V(Arm64I8x16GtS) \
V(Arm64I8x16GeS) \ V(Arm64I8x16GeS) \
V(Arm64I8x16ShrU) \ V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \ V(Arm64I8x16UConvertI16x8) \
V(Arm64I8x16AddSaturateU) \ V(Arm64I8x16AddSaturateU) \
V(Arm64I8x16SubSaturateU) \ V(Arm64I8x16SubSaturateU) \
V(Arm64I8x16MinU) \ V(Arm64I8x16MinU) \
V(Arm64I8x16MaxU) \ V(Arm64I8x16MaxU) \
V(Arm64I8x16GtU) \ V(Arm64I8x16GtU) \
V(Arm64I8x16GeU) \ V(Arm64I8x16GeU) \
V(Arm64S128Zero) \ V(Arm64S128Zero) \
V(Arm64S128Dup) \ V(Arm64S128Dup) \
V(Arm64S128And) \ V(Arm64S128And) \
V(Arm64S128Or) \ V(Arm64S128Or) \
V(Arm64S128Xor) \ V(Arm64S128Xor) \
V(Arm64S128Not) \ V(Arm64S128Not) \
V(Arm64S128Select) \ V(Arm64S128Select) \
V(Arm64S32x4ZipLeft) \ V(Arm64S32x4ZipLeft) \
V(Arm64S32x4ZipRight) \ V(Arm64S32x4ZipRight) \
V(Arm64S32x4UnzipLeft) \ V(Arm64S32x4UnzipLeft) \
V(Arm64S32x4UnzipRight) \ V(Arm64S32x4UnzipRight) \
V(Arm64S32x4TransposeLeft) \ V(Arm64S32x4TransposeLeft) \
V(Arm64S32x4TransposeRight) \ V(Arm64S32x4TransposeRight) \
V(Arm64S32x4Shuffle) \ V(Arm64S32x4Shuffle) \
V(Arm64S16x8ZipLeft) \ V(Arm64S16x8ZipLeft) \
V(Arm64S16x8ZipRight) \ V(Arm64S16x8ZipRight) \
V(Arm64S16x8UnzipLeft) \ V(Arm64S16x8UnzipLeft) \
V(Arm64S16x8UnzipRight) \ V(Arm64S16x8UnzipRight) \
V(Arm64S16x8TransposeLeft) \ V(Arm64S16x8TransposeLeft) \
V(Arm64S16x8TransposeRight) \ V(Arm64S16x8TransposeRight) \
V(Arm64S8x16ZipLeft) \ V(Arm64S8x16ZipLeft) \
V(Arm64S8x16ZipRight) \ V(Arm64S8x16ZipRight) \
V(Arm64S8x16UnzipLeft) \ V(Arm64S8x16UnzipLeft) \
V(Arm64S8x16UnzipRight) \ V(Arm64S8x16UnzipRight) \
V(Arm64S8x16TransposeLeft) \ V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \ V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \ V(Arm64S8x16Concat) \
V(Arm64S8x16Shuffle) \ V(Arm64S8x16Shuffle) \
V(Arm64S32x2Reverse) \ V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \ V(Arm64S16x4Reverse) \
V(Arm64S16x2Reverse) \ V(Arm64S16x2Reverse) \
V(Arm64S8x8Reverse) \ V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \ V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \ V(Arm64S8x2Reverse) \
V(Arm64S1x4AnyTrue) \ V(Arm64S1x4AnyTrue) \
V(Arm64S1x4AllTrue) \ V(Arm64S1x4AllTrue) \
V(Arm64S1x8AnyTrue) \ V(Arm64S1x8AnyTrue) \
V(Arm64S1x8AllTrue) \ V(Arm64S1x8AllTrue) \
V(Arm64S1x16AnyTrue) \ V(Arm64S1x16AnyTrue) \
V(Arm64S1x16AllTrue) \ V(Arm64S1x16AllTrue) \
V(Arm64Word64AtomicAddUint8) \ V(Arm64Word64AtomicLoadUint8) \
V(Arm64Word64AtomicAddUint16) \ V(Arm64Word64AtomicLoadUint16) \
V(Arm64Word64AtomicAddUint32) \ V(Arm64Word64AtomicLoadUint32) \
V(Arm64Word64AtomicAddUint64) \ V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicSubUint8) \ V(Arm64Word64AtomicStoreWord8) \
V(Arm64Word64AtomicSubUint16) \ V(Arm64Word64AtomicStoreWord16) \
V(Arm64Word64AtomicSubUint32) \ V(Arm64Word64AtomicStoreWord32) \
V(Arm64Word64AtomicSubUint64) \ V(Arm64Word64AtomicStoreWord64) \
V(Arm64Word64AtomicAndUint8) \ V(Arm64Word64AtomicAddUint8) \
V(Arm64Word64AtomicAndUint16) \ V(Arm64Word64AtomicAddUint16) \
V(Arm64Word64AtomicAndUint32) \ V(Arm64Word64AtomicAddUint32) \
V(Arm64Word64AtomicAndUint64) \ V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicOrUint8) \ V(Arm64Word64AtomicSubUint8) \
V(Arm64Word64AtomicOrUint16) \ V(Arm64Word64AtomicSubUint16) \
V(Arm64Word64AtomicOrUint32) \ V(Arm64Word64AtomicSubUint32) \
V(Arm64Word64AtomicOrUint64) \ V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicXorUint8) \ V(Arm64Word64AtomicAndUint8) \
V(Arm64Word64AtomicXorUint16) \ V(Arm64Word64AtomicAndUint16) \
V(Arm64Word64AtomicXorUint32) \ V(Arm64Word64AtomicAndUint32) \
V(Arm64Word64AtomicXorUint64) V(Arm64Word64AtomicAndUint64) \
V(Arm64Word64AtomicOrUint8) \
V(Arm64Word64AtomicOrUint16) \
V(Arm64Word64AtomicOrUint32) \
V(Arm64Word64AtomicOrUint64) \
V(Arm64Word64AtomicXorUint8) \
V(Arm64Word64AtomicXorUint16) \
V(Arm64Word64AtomicXorUint32) \
V(Arm64Word64AtomicXorUint64) \
V(Arm64Word64AtomicExchangeUint8) \
V(Arm64Word64AtomicExchangeUint16) \
V(Arm64Word64AtomicExchangeUint32) \
V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint8) \
V(Arm64Word64AtomicCompareExchangeUint16) \
V(Arm64Word64AtomicCompareExchangeUint32) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes // Many instructions support multiple addressing modes. Addressing modes
......
...@@ -309,6 +309,16 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -309,6 +309,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64DsbIsb: case kArm64DsbIsb:
return kHasSideEffect; return kHasSideEffect;
case kArm64Word64AtomicLoadUint8:
case kArm64Word64AtomicLoadUint16:
case kArm64Word64AtomicLoadUint32:
case kArm64Word64AtomicLoadUint64:
return kIsLoadOperation;
case kArm64Word64AtomicStoreWord8:
case kArm64Word64AtomicStoreWord16:
case kArm64Word64AtomicStoreWord32:
case kArm64Word64AtomicStoreWord64:
case kArm64Word64AtomicAddUint8: case kArm64Word64AtomicAddUint8:
case kArm64Word64AtomicAddUint16: case kArm64Word64AtomicAddUint16:
case kArm64Word64AtomicAddUint32: case kArm64Word64AtomicAddUint32:
...@@ -329,6 +339,14 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -329,6 +339,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Word64AtomicXorUint16: case kArm64Word64AtomicXorUint16:
case kArm64Word64AtomicXorUint32: case kArm64Word64AtomicXorUint32:
case kArm64Word64AtomicXorUint64: case kArm64Word64AtomicXorUint64:
case kArm64Word64AtomicExchangeUint8:
case kArm64Word64AtomicExchangeUint16:
case kArm64Word64AtomicExchangeUint32:
case kArm64Word64AtomicExchangeUint64:
case kArm64Word64AtomicCompareExchangeUint8:
case kArm64Word64AtomicCompareExchangeUint16:
case kArm64Word64AtomicCompareExchangeUint32:
case kArm64Word64AtomicCompareExchangeUint64:
return kHasSideEffect; return kHasSideEffect;
#define CASE(Name) case k##Name: #define CASE(Name) case k##Name:
......
...@@ -2102,6 +2102,76 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node, ...@@ -2102,6 +2102,76 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
} }
} }
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 1, outputs, input_count, inputs, arraysize(temps),
temps);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
InstructionOperand inputs[4];
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 1, outputs, input_count, inputs, arraysize(temps),
temps);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
InstructionOperand temps[] = {g.TempRegister()};
selector->Emit(code, 0, nullptr, input_count, inputs, arraysize(temps),
temps);
}
} // namespace } // namespace
void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
...@@ -2544,9 +2614,6 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { ...@@ -2544,9 +2614,6 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) { void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op()); LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop; ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) { switch (load_rep.representation()) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
...@@ -2564,20 +2631,34 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) { ...@@ -2564,20 +2631,34 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
UNREACHABLE(); UNREACHABLE();
return; return;
} }
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)}; VisitAtomicLoad(this, node, opcode);
InstructionOperand outputs[] = {g.DefineAsRegister(node)}; }
InstructionOperand temps[] = {g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR); void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, LoadRepresentation load_rep = LoadRepresentationOf(node->op());
arraysize(temps), temps); ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
opcode = kArm64Word64AtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
opcode = kArm64Word64AtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
opcode = kArm64Word64AtomicLoadUint32;
break;
case MachineRepresentation::kWord64:
opcode = kArm64Word64AtomicLoadUint64;
break;
default:
UNREACHABLE();
return;
}
VisitAtomicLoad(this, node, opcode);
} }
void InstructionSelector::VisitWord32AtomicStore(Node* node) { void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop; ArchOpcode opcode = kArchNop;
switch (rep) { switch (rep) {
case MachineRepresentation::kWord8: case MachineRepresentation::kWord8:
...@@ -2593,23 +2674,33 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) { ...@@ -2593,23 +2674,33 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
UNREACHABLE(); UNREACHABLE();
return; return;
} }
VisitAtomicStore(this, node, opcode);
}
AddressingMode addressing_mode = kMode_MRR; void InstructionSelector::VisitWord64AtomicStore(Node* node) {
InstructionOperand inputs[3]; MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
size_t input_count = 0; ArchOpcode opcode = kArchNop;
inputs[input_count++] = g.UseUniqueRegister(base); switch (rep) {
inputs[input_count++] = g.UseUniqueRegister(index); case MachineRepresentation::kWord8:
inputs[input_count++] = g.UseUniqueRegister(value); opcode = kArm64Word64AtomicStoreWord8;
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); break;
InstructionOperand temps[] = {g.TempRegister()}; case MachineRepresentation::kWord16:
Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), temps); opcode = kArm64Word64AtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
opcode = kArm64Word64AtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kArm64Word64AtomicStoreWord64;
break;
default:
UNREACHABLE();
return;
}
VisitAtomicStore(this, node, opcode);
} }
void InstructionSelector::VisitWord32AtomicExchange(Node* node) { void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop; ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op()); MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) { if (type == MachineType::Int8()) {
...@@ -2626,26 +2717,28 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) { ...@@ -2626,26 +2717,28 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
UNREACHABLE(); UNREACHABLE();
return; return;
} }
VisitAtomicExchange(this, node, opcode);
}
AddressingMode addressing_mode = kMode_MRR; void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
InstructionOperand inputs[3]; ArchOpcode opcode = kArchNop;
size_t input_count = 0; MachineType type = AtomicOpRepresentationOf(node->op());
inputs[input_count++] = g.UseRegister(base); if (type == MachineType::Uint8()) {
inputs[input_count++] = g.UseRegister(index); opcode = kArm64Word64AtomicExchangeUint8;
inputs[input_count++] = g.UseUniqueRegister(value); } else if (type == MachineType::Uint16()) {
InstructionOperand outputs[1]; opcode = kArm64Word64AtomicExchangeUint16;
outputs[0] = g.DefineAsRegister(node); } else if (type == MachineType::Uint32()) {
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; opcode = kArm64Word64AtomicExchangeUint32;
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); } else if (type == MachineType::Uint64()) {
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps); opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
return;
}
VisitAtomicExchange(this, node, opcode);
} }
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop; ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpRepresentationOf(node->op()); MachineType type = AtomicOpRepresentationOf(node->op());
if (type == MachineType::Int8()) { if (type == MachineType::Int8()) {
...@@ -2662,19 +2755,25 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { ...@@ -2662,19 +2755,25 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNREACHABLE(); UNREACHABLE();
return; return;
} }
VisitAtomicCompareExchange(this, node, opcode);
}
AddressingMode addressing_mode = kMode_MRR; void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
InstructionOperand inputs[4]; ArchOpcode opcode = kArchNop;
size_t input_count = 0; MachineType type = AtomicOpRepresentationOf(node->op());
inputs[input_count++] = g.UseRegister(base); if (type == MachineType::Uint8()) {
inputs[input_count++] = g.UseRegister(index); opcode = kArm64Word64AtomicCompareExchangeUint8;
inputs[input_count++] = g.UseUniqueRegister(old_value); } else if (type == MachineType::Uint16()) {
inputs[input_count++] = g.UseUniqueRegister(new_value); opcode = kArm64Word64AtomicCompareExchangeUint16;
InstructionOperand outputs[1]; } else if (type == MachineType::Uint32()) {
outputs[0] = g.DefineAsRegister(node); opcode = kArm64Word64AtomicCompareExchangeUint32;
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; } else if (type == MachineType::Uint64()) {
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); opcode = kArm64Word64AtomicCompareExchangeUint64;
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps); } else {
UNREACHABLE();
return;
}
VisitAtomicCompareExchange(this, node, opcode);
} }
void InstructionSelector::VisitAtomicBinaryOperation( void InstructionSelector::VisitAtomicBinaryOperation(
......
...@@ -2317,15 +2317,13 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { ...@@ -2317,15 +2317,13 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS #endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32 // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_X64 #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) { void InstructionSelector::VisitWord64AtomicStore(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif // !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
...@@ -2335,9 +2333,7 @@ void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); } ...@@ -2335,9 +2333,7 @@ void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_X64
void InstructionSelector::VisitWord64AtomicExchange(Node* node) { void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -2345,7 +2341,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) { ...@@ -2345,7 +2341,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif // !V8_TARGET_ARCH_X64 #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 !V8_TARGET_ARCH_MIPS64
......
...@@ -46,11 +46,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr) { ...@@ -46,11 +46,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor) { WASM_COMPILED_EXEC_TEST(I64AtomicXor) {
RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor); RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange) {
RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange); RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
} }
#endif // V8_TARGET_ARCH_X64
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) { Uint32BinOp expected_op) {
...@@ -88,11 +86,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) { ...@@ -88,11 +86,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) {
RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor); RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) {
RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange); RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
} }
#endif // V8_TARGET_ARCH_X64
void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op, void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
Uint16BinOp expected_op) { Uint16BinOp expected_op) {
...@@ -130,11 +126,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) { ...@@ -130,11 +126,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor); RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange); RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
} }
#endif // V8_TARGET_ARCH_X64
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) { Uint8BinOp expected_op) {
...@@ -172,8 +166,6 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) { ...@@ -172,8 +166,6 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) {
WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor); RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
} }
#if V8_TARGET_ARCH_X64
WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) { WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange); RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
} }
...@@ -397,7 +389,6 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) { ...@@ -397,7 +389,6 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) {
CHECK_EQ(*i, r.builder().ReadMemory(&memory[0])); CHECK_EQ(*i, r.builder().ReadMemory(&memory[0]));
} }
} }
#endif // V8_TARGET_ARCH_X64
} // namespace test_run_wasm_atomics_64 } // namespace test_run_wasm_atomics_64
} // namespace wasm } // namespace wasm
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment