Commit e301d71f authored by Georg Neis's avatar Georg Neis Committed by V8 LUCI CQ

[compiler] Teach InstructionScheduler about protected memory accesses

Because these instructions can trap, we don't want them to be reordered
as freely as unprotected accesses.

As part of this, make explicit which opcodes support a MemoryAccessMode.

Bug: v8:12018
Change-Id: I9db3053d7d62ffce6d3c95d62adce71ae40dae62
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3172770Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Georg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77031}
parent bb5fa039
......@@ -11,357 +11,362 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(ArmAdd) \
V(ArmAnd) \
V(ArmBic) \
V(ArmClz) \
V(ArmCmp) \
V(ArmCmn) \
V(ArmTst) \
V(ArmTeq) \
V(ArmOrr) \
V(ArmEor) \
V(ArmSub) \
V(ArmRsb) \
V(ArmMul) \
V(ArmMla) \
V(ArmMls) \
V(ArmSmull) \
V(ArmSmmul) \
V(ArmSmmla) \
V(ArmUmull) \
V(ArmSdiv) \
V(ArmUdiv) \
V(ArmMov) \
V(ArmMvn) \
V(ArmBfc) \
V(ArmUbfx) \
V(ArmSbfx) \
V(ArmSxtb) \
V(ArmSxth) \
V(ArmSxtab) \
V(ArmSxtah) \
V(ArmUxtb) \
V(ArmUxth) \
V(ArmUxtab) \
V(ArmRbit) \
V(ArmRev) \
V(ArmUxtah) \
V(ArmAddPair) \
V(ArmSubPair) \
V(ArmMulPair) \
V(ArmLslPair) \
V(ArmLsrPair) \
V(ArmAsrPair) \
V(ArmVcmpF32) \
V(ArmVaddF32) \
V(ArmVsubF32) \
V(ArmVmulF32) \
V(ArmVmlaF32) \
V(ArmVmlsF32) \
V(ArmVdivF32) \
V(ArmVabsF32) \
V(ArmVnegF32) \
V(ArmVsqrtF32) \
V(ArmVcmpF64) \
V(ArmVaddF64) \
V(ArmVsubF64) \
V(ArmVmulF64) \
V(ArmVmlaF64) \
V(ArmVmlsF64) \
V(ArmVdivF64) \
V(ArmVmodF64) \
V(ArmVabsF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
V(ArmVmullLow) \
V(ArmVmullHigh) \
V(ArmVrintmF32) \
V(ArmVrintmF64) \
V(ArmVrintpF32) \
V(ArmVrintpF64) \
V(ArmVrintzF32) \
V(ArmVrintzF64) \
V(ArmVrintaF64) \
V(ArmVrintnF32) \
V(ArmVrintnF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF32S32) \
V(ArmVcvtF32U32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F32) \
V(ArmVcvtU32F32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
V(ArmVmovU32F32) \
V(ArmVmovF32U32) \
V(ArmVmovLowU32F64) \
V(ArmVmovLowF64U32) \
V(ArmVmovHighU32F64) \
V(ArmVmovHighF64U32) \
V(ArmVmovF64U32U32) \
V(ArmVmovU32U32F64) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
V(ArmVld1F64) \
V(ArmVstrF64) \
V(ArmVst1F64) \
V(ArmVld1S128) \
V(ArmVst1S128) \
V(ArmVcnt) \
V(ArmVpadal) \
V(ArmVpaddl) \
V(ArmFloat32Max) \
V(ArmFloat64Max) \
V(ArmFloat32Min) \
V(ArmFloat64Min) \
V(ArmFloat64SilenceNaN) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
V(ArmLdrh) \
V(ArmLdrsh) \
V(ArmStrh) \
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
V(ArmDmbIsh) \
V(ArmDsbIsb) \
V(ArmF64x2Splat) \
V(ArmF64x2ExtractLane) \
V(ArmF64x2ReplaceLane) \
V(ArmF64x2Abs) \
V(ArmF64x2Neg) \
V(ArmF64x2Sqrt) \
V(ArmF64x2Add) \
V(ArmF64x2Sub) \
V(ArmF64x2Mul) \
V(ArmF64x2Div) \
V(ArmF64x2Min) \
V(ArmF64x2Max) \
V(ArmF64x2Eq) \
V(ArmF64x2Ne) \
V(ArmF64x2Lt) \
V(ArmF64x2Le) \
V(ArmF64x2Pmin) \
V(ArmF64x2Pmax) \
V(ArmF64x2Ceil) \
V(ArmF64x2Floor) \
V(ArmF64x2Trunc) \
V(ArmF64x2NearestInt) \
V(ArmF64x2ConvertLowI32x4S) \
V(ArmF64x2ConvertLowI32x4U) \
V(ArmF64x2PromoteLowF32x4) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
V(ArmF32x4SConvertI32x4) \
V(ArmF32x4UConvertI32x4) \
V(ArmF32x4Abs) \
V(ArmF32x4Neg) \
V(ArmF32x4Sqrt) \
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
V(ArmF32x4Sub) \
V(ArmF32x4Mul) \
V(ArmF32x4Div) \
V(ArmF32x4Min) \
V(ArmF32x4Max) \
V(ArmF32x4Eq) \
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
V(ArmF32x4Le) \
V(ArmF32x4Pmin) \
V(ArmF32x4Pmax) \
V(ArmF32x4DemoteF64x2Zero) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
V(ArmI64x2Abs) \
V(ArmI64x2Neg) \
V(ArmI64x2Shl) \
V(ArmI64x2ShrS) \
V(ArmI64x2Add) \
V(ArmI64x2Sub) \
V(ArmI64x2Mul) \
V(ArmI64x2ShrU) \
V(ArmI64x2BitMask) \
V(ArmI64x2Eq) \
V(ArmI64x2Ne) \
V(ArmI64x2GtS) \
V(ArmI64x2GeS) \
V(ArmI64x2SConvertI32x4Low) \
V(ArmI64x2SConvertI32x4High) \
V(ArmI64x2UConvertI32x4Low) \
V(ArmI64x2UConvertI32x4High) \
V(ArmI32x4Splat) \
V(ArmI32x4ExtractLane) \
V(ArmI32x4ReplaceLane) \
V(ArmI32x4SConvertF32x4) \
V(ArmI32x4SConvertI16x8Low) \
V(ArmI32x4SConvertI16x8High) \
V(ArmI32x4Neg) \
V(ArmI32x4Shl) \
V(ArmI32x4ShrS) \
V(ArmI32x4Add) \
V(ArmI32x4Sub) \
V(ArmI32x4Mul) \
V(ArmI32x4MinS) \
V(ArmI32x4MaxS) \
V(ArmI32x4Eq) \
V(ArmI32x4Ne) \
V(ArmI32x4GtS) \
V(ArmI32x4GeS) \
V(ArmI32x4UConvertF32x4) \
V(ArmI32x4UConvertI16x8Low) \
V(ArmI32x4UConvertI16x8High) \
V(ArmI32x4ShrU) \
V(ArmI32x4MinU) \
V(ArmI32x4MaxU) \
V(ArmI32x4GtU) \
V(ArmI32x4GeU) \
V(ArmI32x4Abs) \
V(ArmI32x4BitMask) \
V(ArmI32x4DotI16x8S) \
V(ArmI32x4TruncSatF64x2SZero) \
V(ArmI32x4TruncSatF64x2UZero) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLaneS) \
V(ArmI16x8ReplaceLane) \
V(ArmI16x8SConvertI8x16Low) \
V(ArmI16x8SConvertI8x16High) \
V(ArmI16x8Neg) \
V(ArmI16x8Shl) \
V(ArmI16x8ShrS) \
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
V(ArmI16x8AddSatS) \
V(ArmI16x8Sub) \
V(ArmI16x8SubSatS) \
V(ArmI16x8Mul) \
V(ArmI16x8MinS) \
V(ArmI16x8MaxS) \
V(ArmI16x8Eq) \
V(ArmI16x8Ne) \
V(ArmI16x8GtS) \
V(ArmI16x8GeS) \
V(ArmI16x8ExtractLaneU) \
V(ArmI16x8UConvertI8x16Low) \
V(ArmI16x8UConvertI8x16High) \
V(ArmI16x8ShrU) \
V(ArmI16x8UConvertI32x4) \
V(ArmI16x8AddSatU) \
V(ArmI16x8SubSatU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
V(ArmI16x8GtU) \
V(ArmI16x8GeU) \
V(ArmI16x8RoundingAverageU) \
V(ArmI16x8Abs) \
V(ArmI16x8BitMask) \
V(ArmI16x8Q15MulRSatS) \
V(ArmI8x16Splat) \
V(ArmI8x16ExtractLaneS) \
V(ArmI8x16ReplaceLane) \
V(ArmI8x16Neg) \
V(ArmI8x16Shl) \
V(ArmI8x16ShrS) \
V(ArmI8x16SConvertI16x8) \
V(ArmI8x16Add) \
V(ArmI8x16AddSatS) \
V(ArmI8x16Sub) \
V(ArmI8x16SubSatS) \
V(ArmI8x16MinS) \
V(ArmI8x16MaxS) \
V(ArmI8x16Eq) \
V(ArmI8x16Ne) \
V(ArmI8x16GtS) \
V(ArmI8x16GeS) \
V(ArmI8x16ExtractLaneU) \
V(ArmI8x16ShrU) \
V(ArmI8x16UConvertI16x8) \
V(ArmI8x16AddSatU) \
V(ArmI8x16SubSatU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
V(ArmI8x16GtU) \
V(ArmI8x16GeU) \
V(ArmI8x16RoundingAverageU) \
V(ArmI8x16Abs) \
V(ArmI8x16BitMask) \
V(ArmS128Const) \
V(ArmS128Zero) \
V(ArmS128AllOnes) \
V(ArmS128Dup) \
V(ArmS128And) \
V(ArmS128Or) \
V(ArmS128Xor) \
V(ArmS128Not) \
V(ArmS128Select) \
V(ArmS128AndNot) \
V(ArmS32x4ZipLeft) \
V(ArmS32x4ZipRight) \
V(ArmS32x4UnzipLeft) \
V(ArmS32x4UnzipRight) \
V(ArmS32x4TransposeLeft) \
V(ArmS32x4TransposeRight) \
V(ArmS32x4Shuffle) \
V(ArmS16x8ZipLeft) \
V(ArmS16x8ZipRight) \
V(ArmS16x8UnzipLeft) \
V(ArmS16x8UnzipRight) \
V(ArmS16x8TransposeLeft) \
V(ArmS16x8TransposeRight) \
V(ArmS8x16ZipLeft) \
V(ArmS8x16ZipRight) \
V(ArmS8x16UnzipLeft) \
V(ArmS8x16UnzipRight) \
V(ArmS8x16TransposeLeft) \
V(ArmS8x16TransposeRight) \
V(ArmS8x16Concat) \
V(ArmI8x16Swizzle) \
V(ArmI8x16Shuffle) \
V(ArmS32x2Reverse) \
V(ArmS16x4Reverse) \
V(ArmS16x2Reverse) \
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
V(ArmI64x2AllTrue) \
V(ArmI32x4AllTrue) \
V(ArmI16x8AllTrue) \
V(ArmV128AnyTrue) \
V(ArmI8x16AllTrue) \
V(ArmS128Load8Splat) \
V(ArmS128Load16Splat) \
V(ArmS128Load32Splat) \
V(ArmS128Load64Splat) \
V(ArmS128Load8x8S) \
V(ArmS128Load8x8U) \
V(ArmS128Load16x4S) \
V(ArmS128Load16x4U) \
V(ArmS128Load32x2S) \
V(ArmS128Load32x2U) \
V(ArmS128Load32Zero) \
V(ArmS128Load64Zero) \
V(ArmS128LoadLaneLow) \
V(ArmS128LoadLaneHigh) \
V(ArmS128StoreLaneLow) \
V(ArmS128StoreLaneHigh) \
V(ArmWord32AtomicPairLoad) \
V(ArmWord32AtomicPairStore) \
V(ArmWord32AtomicPairAdd) \
V(ArmWord32AtomicPairSub) \
V(ArmWord32AtomicPairAnd) \
V(ArmWord32AtomicPairOr) \
V(ArmWord32AtomicPairXor) \
V(ArmWord32AtomicPairExchange) \
// Opcodes that support a MemoryAccessMode.
#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
#define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(ArmAdd) \
V(ArmAnd) \
V(ArmBic) \
V(ArmClz) \
V(ArmCmp) \
V(ArmCmn) \
V(ArmTst) \
V(ArmTeq) \
V(ArmOrr) \
V(ArmEor) \
V(ArmSub) \
V(ArmRsb) \
V(ArmMul) \
V(ArmMla) \
V(ArmMls) \
V(ArmSmull) \
V(ArmSmmul) \
V(ArmSmmla) \
V(ArmUmull) \
V(ArmSdiv) \
V(ArmUdiv) \
V(ArmMov) \
V(ArmMvn) \
V(ArmBfc) \
V(ArmUbfx) \
V(ArmSbfx) \
V(ArmSxtb) \
V(ArmSxth) \
V(ArmSxtab) \
V(ArmSxtah) \
V(ArmUxtb) \
V(ArmUxth) \
V(ArmUxtab) \
V(ArmRbit) \
V(ArmRev) \
V(ArmUxtah) \
V(ArmAddPair) \
V(ArmSubPair) \
V(ArmMulPair) \
V(ArmLslPair) \
V(ArmLsrPair) \
V(ArmAsrPair) \
V(ArmVcmpF32) \
V(ArmVaddF32) \
V(ArmVsubF32) \
V(ArmVmulF32) \
V(ArmVmlaF32) \
V(ArmVmlsF32) \
V(ArmVdivF32) \
V(ArmVabsF32) \
V(ArmVnegF32) \
V(ArmVsqrtF32) \
V(ArmVcmpF64) \
V(ArmVaddF64) \
V(ArmVsubF64) \
V(ArmVmulF64) \
V(ArmVmlaF64) \
V(ArmVmlsF64) \
V(ArmVdivF64) \
V(ArmVmodF64) \
V(ArmVabsF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
V(ArmVmullLow) \
V(ArmVmullHigh) \
V(ArmVrintmF32) \
V(ArmVrintmF64) \
V(ArmVrintpF32) \
V(ArmVrintpF64) \
V(ArmVrintzF32) \
V(ArmVrintzF64) \
V(ArmVrintaF64) \
V(ArmVrintnF32) \
V(ArmVrintnF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF32S32) \
V(ArmVcvtF32U32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F32) \
V(ArmVcvtU32F32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
V(ArmVmovU32F32) \
V(ArmVmovF32U32) \
V(ArmVmovLowU32F64) \
V(ArmVmovLowF64U32) \
V(ArmVmovHighU32F64) \
V(ArmVmovHighF64U32) \
V(ArmVmovF64U32U32) \
V(ArmVmovU32U32F64) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
V(ArmVld1F64) \
V(ArmVstrF64) \
V(ArmVst1F64) \
V(ArmVld1S128) \
V(ArmVst1S128) \
V(ArmVcnt) \
V(ArmVpadal) \
V(ArmVpaddl) \
V(ArmFloat32Max) \
V(ArmFloat64Max) \
V(ArmFloat32Min) \
V(ArmFloat64Min) \
V(ArmFloat64SilenceNaN) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
V(ArmLdrh) \
V(ArmLdrsh) \
V(ArmStrh) \
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
V(ArmDmbIsh) \
V(ArmDsbIsb) \
V(ArmF64x2Splat) \
V(ArmF64x2ExtractLane) \
V(ArmF64x2ReplaceLane) \
V(ArmF64x2Abs) \
V(ArmF64x2Neg) \
V(ArmF64x2Sqrt) \
V(ArmF64x2Add) \
V(ArmF64x2Sub) \
V(ArmF64x2Mul) \
V(ArmF64x2Div) \
V(ArmF64x2Min) \
V(ArmF64x2Max) \
V(ArmF64x2Eq) \
V(ArmF64x2Ne) \
V(ArmF64x2Lt) \
V(ArmF64x2Le) \
V(ArmF64x2Pmin) \
V(ArmF64x2Pmax) \
V(ArmF64x2Ceil) \
V(ArmF64x2Floor) \
V(ArmF64x2Trunc) \
V(ArmF64x2NearestInt) \
V(ArmF64x2ConvertLowI32x4S) \
V(ArmF64x2ConvertLowI32x4U) \
V(ArmF64x2PromoteLowF32x4) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
V(ArmF32x4SConvertI32x4) \
V(ArmF32x4UConvertI32x4) \
V(ArmF32x4Abs) \
V(ArmF32x4Neg) \
V(ArmF32x4Sqrt) \
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
V(ArmF32x4Sub) \
V(ArmF32x4Mul) \
V(ArmF32x4Div) \
V(ArmF32x4Min) \
V(ArmF32x4Max) \
V(ArmF32x4Eq) \
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
V(ArmF32x4Le) \
V(ArmF32x4Pmin) \
V(ArmF32x4Pmax) \
V(ArmF32x4DemoteF64x2Zero) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
V(ArmI64x2Abs) \
V(ArmI64x2Neg) \
V(ArmI64x2Shl) \
V(ArmI64x2ShrS) \
V(ArmI64x2Add) \
V(ArmI64x2Sub) \
V(ArmI64x2Mul) \
V(ArmI64x2ShrU) \
V(ArmI64x2BitMask) \
V(ArmI64x2Eq) \
V(ArmI64x2Ne) \
V(ArmI64x2GtS) \
V(ArmI64x2GeS) \
V(ArmI64x2SConvertI32x4Low) \
V(ArmI64x2SConvertI32x4High) \
V(ArmI64x2UConvertI32x4Low) \
V(ArmI64x2UConvertI32x4High) \
V(ArmI32x4Splat) \
V(ArmI32x4ExtractLane) \
V(ArmI32x4ReplaceLane) \
V(ArmI32x4SConvertF32x4) \
V(ArmI32x4SConvertI16x8Low) \
V(ArmI32x4SConvertI16x8High) \
V(ArmI32x4Neg) \
V(ArmI32x4Shl) \
V(ArmI32x4ShrS) \
V(ArmI32x4Add) \
V(ArmI32x4Sub) \
V(ArmI32x4Mul) \
V(ArmI32x4MinS) \
V(ArmI32x4MaxS) \
V(ArmI32x4Eq) \
V(ArmI32x4Ne) \
V(ArmI32x4GtS) \
V(ArmI32x4GeS) \
V(ArmI32x4UConvertF32x4) \
V(ArmI32x4UConvertI16x8Low) \
V(ArmI32x4UConvertI16x8High) \
V(ArmI32x4ShrU) \
V(ArmI32x4MinU) \
V(ArmI32x4MaxU) \
V(ArmI32x4GtU) \
V(ArmI32x4GeU) \
V(ArmI32x4Abs) \
V(ArmI32x4BitMask) \
V(ArmI32x4DotI16x8S) \
V(ArmI32x4TruncSatF64x2SZero) \
V(ArmI32x4TruncSatF64x2UZero) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLaneS) \
V(ArmI16x8ReplaceLane) \
V(ArmI16x8SConvertI8x16Low) \
V(ArmI16x8SConvertI8x16High) \
V(ArmI16x8Neg) \
V(ArmI16x8Shl) \
V(ArmI16x8ShrS) \
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
V(ArmI16x8AddSatS) \
V(ArmI16x8Sub) \
V(ArmI16x8SubSatS) \
V(ArmI16x8Mul) \
V(ArmI16x8MinS) \
V(ArmI16x8MaxS) \
V(ArmI16x8Eq) \
V(ArmI16x8Ne) \
V(ArmI16x8GtS) \
V(ArmI16x8GeS) \
V(ArmI16x8ExtractLaneU) \
V(ArmI16x8UConvertI8x16Low) \
V(ArmI16x8UConvertI8x16High) \
V(ArmI16x8ShrU) \
V(ArmI16x8UConvertI32x4) \
V(ArmI16x8AddSatU) \
V(ArmI16x8SubSatU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
V(ArmI16x8GtU) \
V(ArmI16x8GeU) \
V(ArmI16x8RoundingAverageU) \
V(ArmI16x8Abs) \
V(ArmI16x8BitMask) \
V(ArmI16x8Q15MulRSatS) \
V(ArmI8x16Splat) \
V(ArmI8x16ExtractLaneS) \
V(ArmI8x16ReplaceLane) \
V(ArmI8x16Neg) \
V(ArmI8x16Shl) \
V(ArmI8x16ShrS) \
V(ArmI8x16SConvertI16x8) \
V(ArmI8x16Add) \
V(ArmI8x16AddSatS) \
V(ArmI8x16Sub) \
V(ArmI8x16SubSatS) \
V(ArmI8x16MinS) \
V(ArmI8x16MaxS) \
V(ArmI8x16Eq) \
V(ArmI8x16Ne) \
V(ArmI8x16GtS) \
V(ArmI8x16GeS) \
V(ArmI8x16ExtractLaneU) \
V(ArmI8x16ShrU) \
V(ArmI8x16UConvertI16x8) \
V(ArmI8x16AddSatU) \
V(ArmI8x16SubSatU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
V(ArmI8x16GtU) \
V(ArmI8x16GeU) \
V(ArmI8x16RoundingAverageU) \
V(ArmI8x16Abs) \
V(ArmI8x16BitMask) \
V(ArmS128Const) \
V(ArmS128Zero) \
V(ArmS128AllOnes) \
V(ArmS128Dup) \
V(ArmS128And) \
V(ArmS128Or) \
V(ArmS128Xor) \
V(ArmS128Not) \
V(ArmS128Select) \
V(ArmS128AndNot) \
V(ArmS32x4ZipLeft) \
V(ArmS32x4ZipRight) \
V(ArmS32x4UnzipLeft) \
V(ArmS32x4UnzipRight) \
V(ArmS32x4TransposeLeft) \
V(ArmS32x4TransposeRight) \
V(ArmS32x4Shuffle) \
V(ArmS16x8ZipLeft) \
V(ArmS16x8ZipRight) \
V(ArmS16x8UnzipLeft) \
V(ArmS16x8UnzipRight) \
V(ArmS16x8TransposeLeft) \
V(ArmS16x8TransposeRight) \
V(ArmS8x16ZipLeft) \
V(ArmS8x16ZipRight) \
V(ArmS8x16UnzipLeft) \
V(ArmS8x16UnzipRight) \
V(ArmS8x16TransposeLeft) \
V(ArmS8x16TransposeRight) \
V(ArmS8x16Concat) \
V(ArmI8x16Swizzle) \
V(ArmI8x16Shuffle) \
V(ArmS32x2Reverse) \
V(ArmS16x4Reverse) \
V(ArmS16x2Reverse) \
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
V(ArmI64x2AllTrue) \
V(ArmI32x4AllTrue) \
V(ArmI16x8AllTrue) \
V(ArmV128AnyTrue) \
V(ArmI8x16AllTrue) \
V(ArmS128Load8Splat) \
V(ArmS128Load16Splat) \
V(ArmS128Load32Splat) \
V(ArmS128Load64Splat) \
V(ArmS128Load8x8S) \
V(ArmS128Load8x8U) \
V(ArmS128Load16x4S) \
V(ArmS128Load16x4U) \
V(ArmS128Load32x2S) \
V(ArmS128Load32x2U) \
V(ArmS128Load32Zero) \
V(ArmS128Load64Zero) \
V(ArmS128LoadLaneLow) \
V(ArmS128LoadLaneHigh) \
V(ArmS128StoreLaneLow) \
V(ArmS128StoreLaneHigh) \
V(ArmWord32AtomicPairLoad) \
V(ArmWord32AtomicPairStore) \
V(ArmWord32AtomicPairAdd) \
V(ArmWord32AtomicPairSub) \
V(ArmWord32AtomicPairAnd) \
V(ArmWord32AtomicPairOr) \
V(ArmWord32AtomicPairXor) \
V(ArmWord32AtomicPairExchange) \
V(ArmWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -11,339 +11,344 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Arm64Add) \
V(Arm64Add32) \
V(Arm64And) \
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
V(Arm64Clz) \
V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
V(Arm64Cmn32) \
V(Arm64Cnt) \
V(Arm64Cnt32) \
V(Arm64Cnt64) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
V(Arm64Or32) \
V(Arm64Orn) \
V(Arm64Orn32) \
V(Arm64Eor) \
V(Arm64Eor32) \
V(Arm64Eon) \
V(Arm64Eon32) \
V(Arm64Sadalp) \
V(Arm64Saddlp) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smlal) \
V(Arm64Smlal2) \
V(Arm64Smull) \
V(Arm64Smull2) \
V(Arm64Uadalp) \
V(Arm64Uaddlp) \
V(Arm64Umlal) \
V(Arm64Umlal2) \
V(Arm64Umull) \
V(Arm64Umull2) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
V(Arm64Msub32) \
V(Arm64Mneg) \
V(Arm64Mneg32) \
V(Arm64Idiv) \
V(Arm64Idiv32) \
V(Arm64Udiv) \
V(Arm64Udiv32) \
V(Arm64Imod) \
V(Arm64Imod32) \
V(Arm64Umod) \
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
V(Arm64Lsr32) \
V(Arm64Asr) \
V(Arm64Asr32) \
V(Arm64Ror) \
V(Arm64Ror32) \
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
V(Arm64Ubfiz32) \
V(Arm64Bfi) \
V(Arm64Rbit) \
V(Arm64Rbit32) \
V(Arm64Rev) \
V(Arm64Rev32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
V(Arm64Float32Mul) \
V(Arm64Float32Div) \
V(Arm64Float32Abs) \
V(Arm64Float32Abd) \
V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32Fnmul) \
V(Arm64Float32RoundDown) \
V(Arm64Float32Max) \
V(Arm64Float32Min) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Max) \
V(Arm64Float64Min) \
V(Arm64Float64Abs) \
V(Arm64Float64Abd) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64Fnmul) \
V(Arm64Float64RoundDown) \
V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \
V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \
V(Arm64Float64ToInt32) \
V(Arm64Float32ToUint32) \
V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \
V(Arm64Float32ToUint64) \
V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat32) \
V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat32) \
V(Arm64Uint32ToFloat64) \
V(Arm64Uint64ToFloat32) \
V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
V(Arm64StrD) \
V(Arm64LdrQ) \
V(Arm64StrQ) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
V(Arm64LdrsbW) \
V(Arm64Strb) \
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
V(Arm64LdrshW) \
V(Arm64Strh) \
V(Arm64Ldrsw) \
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
V(Arm64LdrDecompressTaggedSigned) \
V(Arm64LdrDecompressTaggedPointer) \
V(Arm64LdrDecompressAnyTagged) \
V(Arm64LdarDecompressTaggedSigned) \
V(Arm64LdarDecompressTaggedPointer) \
V(Arm64LdarDecompressAnyTagged) \
V(Arm64Str) \
V(Arm64StrCompressTagged) \
V(Arm64StlrCompressTagged) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64Sxtl) \
V(Arm64Sxtl2) \
V(Arm64Uxtl) \
V(Arm64Uxtl2) \
V(Arm64FSplat) \
V(Arm64FAbs) \
V(Arm64FSqrt) \
V(Arm64FNeg) \
V(Arm64FExtractLane) \
V(Arm64FReplaceLane) \
V(Arm64FAdd) \
V(Arm64FSub) \
V(Arm64FMul) \
V(Arm64FMulElement) \
V(Arm64FDiv) \
V(Arm64FMin) \
V(Arm64FMax) \
V(Arm64FEq) \
V(Arm64FNe) \
V(Arm64FLt) \
V(Arm64FLe) \
V(Arm64FGt) \
V(Arm64FGe) \
V(Arm64F64x2Qfma) \
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
V(Arm64F64x2ConvertLowI32x4S) \
V(Arm64F64x2ConvertLowI32x4U) \
V(Arm64F64x2PromoteLowF32x4) \
V(Arm64F32x4SConvertI32x4) \
V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Qfma) \
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
V(Arm64F32x4DemoteF64x2Zero) \
V(Arm64ISplat) \
V(Arm64IAbs) \
V(Arm64INeg) \
V(Arm64IExtractLane) \
V(Arm64IReplaceLane) \
V(Arm64I64x2Shl) \
V(Arm64I64x2ShrS) \
V(Arm64IAdd) \
V(Arm64ISub) \
V(Arm64I64x2Mul) \
V(Arm64IEq) \
V(Arm64INe) \
V(Arm64IGtS) \
V(Arm64IGeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4SConvertF32x4) \
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
V(Arm64I32x4Mul) \
V(Arm64Mla) \
V(Arm64Mls) \
V(Arm64IMinS) \
V(Arm64IMaxS) \
V(Arm64I32x4UConvertF32x4) \
V(Arm64I32x4ShrU) \
V(Arm64IMinU) \
V(Arm64IMaxU) \
V(Arm64IGtU) \
V(Arm64IGeU) \
V(Arm64I32x4BitMask) \
V(Arm64I32x4DotI16x8S) \
V(Arm64I32x4TruncSatF64x2SZero) \
V(Arm64I32x4TruncSatF64x2UZero) \
V(Arm64IExtractLaneU) \
V(Arm64IExtractLaneS) \
V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \
V(Arm64IAddSatS) \
V(Arm64ISubSatS) \
V(Arm64I16x8Mul) \
V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \
V(Arm64IAddSatU) \
V(Arm64ISubSatU) \
V(Arm64RoundingAverageU) \
V(Arm64I16x8Q15MulRSatS) \
V(Arm64I16x8BitMask) \
V(Arm64I8x16Shl) \
V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \
V(Arm64I8x16BitMask) \
V(Arm64S128Const) \
V(Arm64S128Zero) \
V(Arm64S128Dup) \
V(Arm64S128And) \
V(Arm64S128Or) \
V(Arm64S128Xor) \
V(Arm64S128Not) \
V(Arm64S128Select) \
V(Arm64S128AndNot) \
V(Arm64Ssra) \
V(Arm64Usra) \
V(Arm64S32x4ZipLeft) \
V(Arm64S32x4ZipRight) \
V(Arm64S32x4UnzipLeft) \
V(Arm64S32x4UnzipRight) \
V(Arm64S32x4TransposeLeft) \
V(Arm64S32x4TransposeRight) \
V(Arm64S32x4Shuffle) \
V(Arm64S16x8ZipLeft) \
V(Arm64S16x8ZipRight) \
V(Arm64S16x8UnzipLeft) \
V(Arm64S16x8UnzipRight) \
V(Arm64S16x8TransposeLeft) \
V(Arm64S16x8TransposeRight) \
V(Arm64S8x16ZipLeft) \
V(Arm64S8x16ZipRight) \
V(Arm64S8x16UnzipLeft) \
V(Arm64S8x16UnzipRight) \
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
V(Arm64I8x16Swizzle) \
V(Arm64I8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
V(Arm64S16x2Reverse) \
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
V(Arm64I64x2AllTrue) \
V(Arm64I32x4AllTrue) \
V(Arm64I16x8AllTrue) \
V(Arm64I8x16AllTrue) \
V(Arm64LoadSplat) \
V(Arm64LoadLane) \
V(Arm64StoreLane) \
V(Arm64S128Load8x8S) \
V(Arm64S128Load8x8U) \
V(Arm64S128Load16x4S) \
V(Arm64S128Load16x4U) \
V(Arm64S128Load32x2S) \
V(Arm64S128Load32x2U) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64) \
V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicAndUint64) \
V(Arm64Word64AtomicOrUint64) \
V(Arm64Word64AtomicXorUint64) \
V(Arm64Word64AtomicExchangeUint64) \
// Opcodes that support a MemoryAccessMode.
#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(Arm64Ldr) \
V(Arm64Ldrb) \
V(Arm64LdrD) \
V(Arm64Ldrh) \
V(Arm64LdrQ) \
V(Arm64LdrS) \
V(Arm64Ldrsb) \
V(Arm64LdrsbW) \
V(Arm64Ldrsh) \
V(Arm64LdrshW) \
V(Arm64Ldrsw) \
V(Arm64LdrW) \
V(Arm64LoadLane) \
V(Arm64LoadSplat) \
V(Arm64S128Load16x4S) \
V(Arm64S128Load16x4U) \
V(Arm64S128Load32x2S) \
V(Arm64S128Load32x2U) \
V(Arm64S128Load8x8S) \
V(Arm64S128Load8x8U) \
V(Arm64StoreLane) \
V(Arm64Str) \
V(Arm64Strb) \
V(Arm64StrD) \
V(Arm64Strh) \
V(Arm64StrQ) \
V(Arm64StrS) \
V(Arm64StrW)
#define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(Arm64Add) \
V(Arm64Add32) \
V(Arm64And) \
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
V(Arm64Clz) \
V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
V(Arm64Cmn32) \
V(Arm64Cnt) \
V(Arm64Cnt32) \
V(Arm64Cnt64) \
V(Arm64Tst) \
V(Arm64Tst32) \
V(Arm64Or) \
V(Arm64Or32) \
V(Arm64Orn) \
V(Arm64Orn32) \
V(Arm64Eor) \
V(Arm64Eor32) \
V(Arm64Eon) \
V(Arm64Eon32) \
V(Arm64Sadalp) \
V(Arm64Saddlp) \
V(Arm64Sub) \
V(Arm64Sub32) \
V(Arm64Mul) \
V(Arm64Mul32) \
V(Arm64Smlal) \
V(Arm64Smlal2) \
V(Arm64Smull) \
V(Arm64Smull2) \
V(Arm64Uadalp) \
V(Arm64Uaddlp) \
V(Arm64Umlal) \
V(Arm64Umlal2) \
V(Arm64Umull) \
V(Arm64Umull2) \
V(Arm64Madd) \
V(Arm64Madd32) \
V(Arm64Msub) \
V(Arm64Msub32) \
V(Arm64Mneg) \
V(Arm64Mneg32) \
V(Arm64Idiv) \
V(Arm64Idiv32) \
V(Arm64Udiv) \
V(Arm64Udiv32) \
V(Arm64Imod) \
V(Arm64Imod32) \
V(Arm64Umod) \
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
V(Arm64Lsr32) \
V(Arm64Asr) \
V(Arm64Asr32) \
V(Arm64Ror) \
V(Arm64Ror32) \
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtb) \
V(Arm64Sxth) \
V(Arm64Sxtw) \
V(Arm64Sbfx) \
V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
V(Arm64Ubfiz32) \
V(Arm64Bfi) \
V(Arm64Rbit) \
V(Arm64Rbit32) \
V(Arm64Rev) \
V(Arm64Rev32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
V(Arm64Float32Mul) \
V(Arm64Float32Div) \
V(Arm64Float32Abs) \
V(Arm64Float32Abd) \
V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32Fnmul) \
V(Arm64Float32RoundDown) \
V(Arm64Float32Max) \
V(Arm64Float32Min) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Max) \
V(Arm64Float64Min) \
V(Arm64Float64Abs) \
V(Arm64Float64Abd) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64Fnmul) \
V(Arm64Float64RoundDown) \
V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \
V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \
V(Arm64Float64ToInt32) \
V(Arm64Float32ToUint32) \
V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \
V(Arm64Float32ToUint64) \
V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat32) \
V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat32) \
V(Arm64Uint32ToFloat64) \
V(Arm64Uint64ToFloat32) \
V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
V(Arm64U64MoveFloat64) \
V(Arm64LdrDecompressTaggedSigned) \
V(Arm64LdrDecompressTaggedPointer) \
V(Arm64LdrDecompressAnyTagged) \
V(Arm64LdarDecompressTaggedSigned) \
V(Arm64LdarDecompressTaggedPointer) \
V(Arm64LdarDecompressAnyTagged) \
V(Arm64StrCompressTagged) \
V(Arm64StlrCompressTagged) \
V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64Sxtl) \
V(Arm64Sxtl2) \
V(Arm64Uxtl) \
V(Arm64Uxtl2) \
V(Arm64FSplat) \
V(Arm64FAbs) \
V(Arm64FSqrt) \
V(Arm64FNeg) \
V(Arm64FExtractLane) \
V(Arm64FReplaceLane) \
V(Arm64FAdd) \
V(Arm64FSub) \
V(Arm64FMul) \
V(Arm64FMulElement) \
V(Arm64FDiv) \
V(Arm64FMin) \
V(Arm64FMax) \
V(Arm64FEq) \
V(Arm64FNe) \
V(Arm64FLt) \
V(Arm64FLe) \
V(Arm64FGt) \
V(Arm64FGe) \
V(Arm64F64x2Qfma) \
V(Arm64F64x2Qfms) \
V(Arm64F64x2Pmin) \
V(Arm64F64x2Pmax) \
V(Arm64F64x2ConvertLowI32x4S) \
V(Arm64F64x2ConvertLowI32x4U) \
V(Arm64F64x2PromoteLowF32x4) \
V(Arm64F32x4SConvertI32x4) \
V(Arm64F32x4UConvertI32x4) \
V(Arm64F32x4RecipApprox) \
V(Arm64F32x4RecipSqrtApprox) \
V(Arm64F32x4Qfma) \
V(Arm64F32x4Qfms) \
V(Arm64F32x4Pmin) \
V(Arm64F32x4Pmax) \
V(Arm64F32x4DemoteF64x2Zero) \
V(Arm64ISplat) \
V(Arm64IAbs) \
V(Arm64INeg) \
V(Arm64IExtractLane) \
V(Arm64IReplaceLane) \
V(Arm64I64x2Shl) \
V(Arm64I64x2ShrS) \
V(Arm64IAdd) \
V(Arm64ISub) \
V(Arm64I64x2Mul) \
V(Arm64IEq) \
V(Arm64INe) \
V(Arm64IGtS) \
V(Arm64IGeS) \
V(Arm64I64x2ShrU) \
V(Arm64I64x2BitMask) \
V(Arm64I32x4SConvertF32x4) \
V(Arm64I32x4Shl) \
V(Arm64I32x4ShrS) \
V(Arm64I32x4Mul) \
V(Arm64Mla) \
V(Arm64Mls) \
V(Arm64IMinS) \
V(Arm64IMaxS) \
V(Arm64I32x4UConvertF32x4) \
V(Arm64I32x4ShrU) \
V(Arm64IMinU) \
V(Arm64IMaxU) \
V(Arm64IGtU) \
V(Arm64IGeU) \
V(Arm64I32x4BitMask) \
V(Arm64I32x4DotI16x8S) \
V(Arm64I32x4TruncSatF64x2SZero) \
V(Arm64I32x4TruncSatF64x2UZero) \
V(Arm64IExtractLaneU) \
V(Arm64IExtractLaneS) \
V(Arm64I16x8Shl) \
V(Arm64I16x8ShrS) \
V(Arm64I16x8SConvertI32x4) \
V(Arm64IAddSatS) \
V(Arm64ISubSatS) \
V(Arm64I16x8Mul) \
V(Arm64I16x8ShrU) \
V(Arm64I16x8UConvertI32x4) \
V(Arm64IAddSatU) \
V(Arm64ISubSatU) \
V(Arm64RoundingAverageU) \
V(Arm64I16x8Q15MulRSatS) \
V(Arm64I16x8BitMask) \
V(Arm64I8x16Shl) \
V(Arm64I8x16ShrS) \
V(Arm64I8x16SConvertI16x8) \
V(Arm64I8x16ShrU) \
V(Arm64I8x16UConvertI16x8) \
V(Arm64I8x16BitMask) \
V(Arm64S128Const) \
V(Arm64S128Zero) \
V(Arm64S128Dup) \
V(Arm64S128And) \
V(Arm64S128Or) \
V(Arm64S128Xor) \
V(Arm64S128Not) \
V(Arm64S128Select) \
V(Arm64S128AndNot) \
V(Arm64Ssra) \
V(Arm64Usra) \
V(Arm64S32x4ZipLeft) \
V(Arm64S32x4ZipRight) \
V(Arm64S32x4UnzipLeft) \
V(Arm64S32x4UnzipRight) \
V(Arm64S32x4TransposeLeft) \
V(Arm64S32x4TransposeRight) \
V(Arm64S32x4Shuffle) \
V(Arm64S16x8ZipLeft) \
V(Arm64S16x8ZipRight) \
V(Arm64S16x8UnzipLeft) \
V(Arm64S16x8UnzipRight) \
V(Arm64S16x8TransposeLeft) \
V(Arm64S16x8TransposeRight) \
V(Arm64S8x16ZipLeft) \
V(Arm64S8x16ZipRight) \
V(Arm64S8x16UnzipLeft) \
V(Arm64S8x16UnzipRight) \
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
V(Arm64I8x16Swizzle) \
V(Arm64I8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
V(Arm64S16x2Reverse) \
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
V(Arm64V128AnyTrue) \
V(Arm64I64x2AllTrue) \
V(Arm64I32x4AllTrue) \
V(Arm64I16x8AllTrue) \
V(Arm64I8x16AllTrue) \
V(Arm64Word64AtomicLoadUint64) \
V(Arm64Word64AtomicStoreWord64) \
V(Arm64Word64AtomicAddUint64) \
V(Arm64Word64AtomicSubUint64) \
V(Arm64Word64AtomicAndUint64) \
V(Arm64Word64AtomicOrUint64) \
V(Arm64Word64AtomicXorUint64) \
V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -11,354 +11,359 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(IA32Add) \
V(IA32And) \
V(IA32Cmp) \
V(IA32Cmp16) \
V(IA32Cmp8) \
V(IA32Test) \
V(IA32Test16) \
V(IA32Test8) \
V(IA32Or) \
V(IA32Xor) \
V(IA32Sub) \
V(IA32Imul) \
V(IA32ImulHigh) \
V(IA32UmulHigh) \
V(IA32Idiv) \
V(IA32Udiv) \
V(IA32Not) \
V(IA32Neg) \
V(IA32Shl) \
V(IA32Shr) \
V(IA32Sar) \
V(IA32AddPair) \
V(IA32SubPair) \
V(IA32MulPair) \
V(IA32ShlPair) \
V(IA32ShrPair) \
V(IA32SarPair) \
V(IA32Rol) \
V(IA32Ror) \
V(IA32Lzcnt) \
V(IA32Tzcnt) \
V(IA32Popcnt) \
V(IA32Bswap) \
V(IA32MFence) \
V(IA32LFence) \
V(IA32Float32Cmp) \
V(IA32Float32Sqrt) \
V(IA32Float32Round) \
V(IA32Float64Cmp) \
V(IA32Float64Mod) \
V(IA32Float32Max) \
V(IA32Float64Max) \
V(IA32Float32Min) \
V(IA32Float64Min) \
V(IA32Float64Sqrt) \
V(IA32Float64Round) \
V(IA32Float32ToFloat64) \
V(IA32Float64ToFloat32) \
V(IA32Float32ToInt32) \
V(IA32Float32ToUint32) \
V(IA32Float64ToInt32) \
V(IA32Float64ToUint32) \
V(SSEInt32ToFloat32) \
V(IA32Uint32ToFloat32) \
V(SSEInt32ToFloat64) \
V(IA32Uint32ToFloat64) \
V(IA32Float64ExtractLowWord32) \
V(IA32Float64ExtractHighWord32) \
V(IA32Float64InsertLowWord32) \
V(IA32Float64InsertHighWord32) \
V(IA32Float64LoadLowWord32) \
V(IA32Float64SilenceNaN) \
V(Float32Add) \
V(Float32Sub) \
V(Float64Add) \
V(Float64Sub) \
V(Float32Mul) \
V(Float32Div) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Abs) \
V(Float64Neg) \
V(Float32Abs) \
V(Float32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
V(IA32Movsxwl) \
V(IA32Movzxwl) \
V(IA32Movw) \
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
V(IA32Movdqu) \
V(IA32Movlps) \
V(IA32Movhps) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32Poke) \
V(IA32Peek) \
V(IA32F64x2Splat) \
V(F64x2ExtractLane) \
V(F64x2ReplaceLane) \
V(IA32F64x2Sqrt) \
V(IA32F64x2Add) \
V(IA32F64x2Sub) \
V(IA32F64x2Mul) \
V(IA32F64x2Div) \
V(IA32F64x2Min) \
V(IA32F64x2Max) \
V(IA32F64x2Eq) \
V(IA32F64x2Ne) \
V(IA32F64x2Lt) \
V(IA32F64x2Le) \
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
V(IA32F64x2Round) \
V(IA32F64x2ConvertLowI32x4S) \
V(IA32F64x2ConvertLowI32x4U) \
V(IA32F64x2PromoteLowF32x4) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
V(IA32I64x2Abs) \
V(IA32I64x2Neg) \
V(IA32I64x2Shl) \
V(IA32I64x2ShrS) \
V(IA32I64x2Add) \
V(IA32I64x2Sub) \
V(IA32I64x2Mul) \
V(IA32I64x2ShrU) \
V(IA32I64x2BitMask) \
V(IA32I64x2Eq) \
V(IA32I64x2Ne) \
V(IA32I64x2GtS) \
V(IA32I64x2GeS) \
V(IA32I64x2ExtMulLowI32x4S) \
V(IA32I64x2ExtMulHighI32x4S) \
V(IA32I64x2ExtMulLowI32x4U) \
V(IA32I64x2ExtMulHighI32x4U) \
V(IA32I64x2SConvertI32x4Low) \
V(IA32I64x2SConvertI32x4High) \
V(IA32I64x2UConvertI32x4Low) \
V(IA32I64x2UConvertI32x4High) \
V(IA32F32x4Splat) \
V(IA32F32x4ExtractLane) \
V(IA32Insertps) \
V(IA32F32x4SConvertI32x4) \
V(IA32F32x4UConvertI32x4) \
V(IA32F32x4Sqrt) \
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
V(IA32F32x4Add) \
V(IA32F32x4Sub) \
V(IA32F32x4Mul) \
V(IA32F32x4Div) \
V(IA32F32x4Min) \
V(IA32F32x4Max) \
V(IA32F32x4Eq) \
V(IA32F32x4Ne) \
V(IA32F32x4Lt) \
V(IA32F32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
V(IA32F32x4DemoteF64x2Zero) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(IA32I32x4SConvertF32x4) \
V(IA32I32x4SConvertI16x8Low) \
V(IA32I32x4SConvertI16x8High) \
V(IA32I32x4Neg) \
V(IA32I32x4Shl) \
V(IA32I32x4ShrS) \
V(IA32I32x4Add) \
V(IA32I32x4Sub) \
V(IA32I32x4Mul) \
V(IA32I32x4MinS) \
V(IA32I32x4MaxS) \
V(IA32I32x4Eq) \
V(IA32I32x4Ne) \
V(IA32I32x4GtS) \
V(IA32I32x4GeS) \
V(SSEI32x4UConvertF32x4) \
V(AVXI32x4UConvertF32x4) \
V(IA32I32x4UConvertI16x8Low) \
V(IA32I32x4UConvertI16x8High) \
V(IA32I32x4ShrU) \
V(IA32I32x4MinU) \
V(IA32I32x4MaxU) \
V(SSEI32x4GtU) \
V(AVXI32x4GtU) \
V(SSEI32x4GeU) \
V(AVXI32x4GeU) \
V(IA32I32x4Abs) \
V(IA32I32x4BitMask) \
V(IA32I32x4DotI16x8S) \
V(IA32I32x4ExtMulLowI16x8S) \
V(IA32I32x4ExtMulHighI16x8S) \
V(IA32I32x4ExtMulLowI16x8U) \
V(IA32I32x4ExtMulHighI16x8U) \
V(IA32I32x4ExtAddPairwiseI16x8S) \
V(IA32I32x4ExtAddPairwiseI16x8U) \
V(IA32I32x4TruncSatF64x2SZero) \
V(IA32I32x4TruncSatF64x2UZero) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneS) \
V(IA32I16x8SConvertI8x16Low) \
V(IA32I16x8SConvertI8x16High) \
V(IA32I16x8Neg) \
V(IA32I16x8Shl) \
V(IA32I16x8ShrS) \
V(IA32I16x8SConvertI32x4) \
V(IA32I16x8Add) \
V(IA32I16x8AddSatS) \
V(IA32I16x8Sub) \
V(IA32I16x8SubSatS) \
V(IA32I16x8Mul) \
V(IA32I16x8MinS) \
V(IA32I16x8MaxS) \
V(IA32I16x8Eq) \
V(SSEI16x8Ne) \
V(AVXI16x8Ne) \
V(IA32I16x8GtS) \
V(SSEI16x8GeS) \
V(AVXI16x8GeS) \
V(IA32I16x8UConvertI8x16Low) \
V(IA32I16x8UConvertI8x16High) \
V(IA32I16x8ShrU) \
V(IA32I16x8UConvertI32x4) \
V(IA32I16x8AddSatU) \
V(IA32I16x8SubSatU) \
V(IA32I16x8MinU) \
V(IA32I16x8MaxU) \
V(SSEI16x8GtU) \
V(AVXI16x8GtU) \
V(SSEI16x8GeU) \
V(AVXI16x8GeU) \
V(IA32I16x8RoundingAverageU) \
V(IA32I16x8Abs) \
V(IA32I16x8BitMask) \
V(IA32I16x8ExtMulLowI8x16S) \
V(IA32I16x8ExtMulHighI8x16S) \
V(IA32I16x8ExtMulLowI8x16U) \
V(IA32I16x8ExtMulHighI8x16U) \
V(IA32I16x8ExtAddPairwiseI8x16S) \
V(IA32I16x8ExtAddPairwiseI8x16U) \
V(IA32I16x8Q15MulRSatS) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLaneS) \
V(IA32Pinsrb) \
V(IA32Pinsrw) \
V(IA32Pinsrd) \
V(IA32Pextrb) \
V(IA32Pextrw) \
V(IA32S128Store32Lane) \
V(IA32I8x16SConvertI16x8) \
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
V(IA32I8x16Add) \
V(IA32I8x16AddSatS) \
V(IA32I8x16Sub) \
V(IA32I8x16SubSatS) \
V(IA32I8x16MinS) \
V(IA32I8x16MaxS) \
V(IA32I8x16Eq) \
V(SSEI8x16Ne) \
V(AVXI8x16Ne) \
V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
V(IA32I8x16UConvertI16x8) \
V(IA32I8x16AddSatU) \
V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
V(IA32I8x16MinU) \
V(IA32I8x16MaxU) \
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
V(AVXI8x16GeU) \
V(IA32I8x16RoundingAverageU) \
V(IA32I8x16Abs) \
V(IA32I8x16BitMask) \
V(IA32I8x16Popcnt) \
V(IA32S128Const) \
V(IA32S128Zero) \
V(IA32S128AllOnes) \
V(IA32S128Not) \
V(IA32S128And) \
V(IA32S128Or) \
V(IA32S128Xor) \
V(IA32S128Select) \
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
V(IA32I8x16Shuffle) \
V(IA32S128Load8Splat) \
V(IA32S128Load16Splat) \
V(IA32S128Load32Splat) \
V(IA32S128Load64Splat) \
V(IA32S128Load8x8S) \
V(IA32S128Load8x8U) \
V(IA32S128Load16x4S) \
V(IA32S128Load16x4U) \
V(IA32S128Load32x2S) \
V(IA32S128Load32x2U) \
V(IA32S32x4Rotate) \
V(IA32S32x4Swizzle) \
V(IA32S32x4Shuffle) \
V(IA32S16x8Blend) \
V(IA32S16x8HalfShuffle1) \
V(IA32S16x8HalfShuffle2) \
V(IA32S8x16Alignr) \
V(IA32S16x8Dup) \
V(IA32S8x16Dup) \
V(SSES16x8UnzipHigh) \
V(AVXS16x8UnzipHigh) \
V(SSES16x8UnzipLow) \
V(AVXS16x8UnzipLow) \
V(SSES8x16UnzipHigh) \
V(AVXS8x16UnzipHigh) \
V(SSES8x16UnzipLow) \
V(AVXS8x16UnzipLow) \
V(IA32S64x2UnpackHigh) \
V(IA32S32x4UnpackHigh) \
V(IA32S16x8UnpackHigh) \
V(IA32S8x16UnpackHigh) \
V(IA32S64x2UnpackLow) \
V(IA32S32x4UnpackLow) \
V(IA32S16x8UnpackLow) \
V(IA32S8x16UnpackLow) \
V(SSES8x16TransposeLow) \
V(AVXS8x16TransposeLow) \
V(SSES8x16TransposeHigh) \
V(AVXS8x16TransposeHigh) \
V(SSES8x8Reverse) \
V(AVXS8x8Reverse) \
V(SSES8x4Reverse) \
V(AVXS8x4Reverse) \
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
V(IA32S128AnyTrue) \
V(IA32I64x2AllTrue) \
V(IA32I32x4AllTrue) \
V(IA32I16x8AllTrue) \
V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32ReleasePairStore) \
V(IA32Word32SeqCstPairStore) \
V(IA32Word32AtomicPairAdd) \
V(IA32Word32AtomicPairSub) \
V(IA32Word32AtomicPairAnd) \
V(IA32Word32AtomicPairOr) \
V(IA32Word32AtomicPairXor) \
V(IA32Word32AtomicPairExchange) \
// Opcodes that support a MemoryAccessMode.
#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
#define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(IA32Add) \
V(IA32And) \
V(IA32Cmp) \
V(IA32Cmp16) \
V(IA32Cmp8) \
V(IA32Test) \
V(IA32Test16) \
V(IA32Test8) \
V(IA32Or) \
V(IA32Xor) \
V(IA32Sub) \
V(IA32Imul) \
V(IA32ImulHigh) \
V(IA32UmulHigh) \
V(IA32Idiv) \
V(IA32Udiv) \
V(IA32Not) \
V(IA32Neg) \
V(IA32Shl) \
V(IA32Shr) \
V(IA32Sar) \
V(IA32AddPair) \
V(IA32SubPair) \
V(IA32MulPair) \
V(IA32ShlPair) \
V(IA32ShrPair) \
V(IA32SarPair) \
V(IA32Rol) \
V(IA32Ror) \
V(IA32Lzcnt) \
V(IA32Tzcnt) \
V(IA32Popcnt) \
V(IA32Bswap) \
V(IA32MFence) \
V(IA32LFence) \
V(IA32Float32Cmp) \
V(IA32Float32Sqrt) \
V(IA32Float32Round) \
V(IA32Float64Cmp) \
V(IA32Float64Mod) \
V(IA32Float32Max) \
V(IA32Float64Max) \
V(IA32Float32Min) \
V(IA32Float64Min) \
V(IA32Float64Sqrt) \
V(IA32Float64Round) \
V(IA32Float32ToFloat64) \
V(IA32Float64ToFloat32) \
V(IA32Float32ToInt32) \
V(IA32Float32ToUint32) \
V(IA32Float64ToInt32) \
V(IA32Float64ToUint32) \
V(SSEInt32ToFloat32) \
V(IA32Uint32ToFloat32) \
V(SSEInt32ToFloat64) \
V(IA32Uint32ToFloat64) \
V(IA32Float64ExtractLowWord32) \
V(IA32Float64ExtractHighWord32) \
V(IA32Float64InsertLowWord32) \
V(IA32Float64InsertHighWord32) \
V(IA32Float64LoadLowWord32) \
V(IA32Float64SilenceNaN) \
V(Float32Add) \
V(Float32Sub) \
V(Float64Add) \
V(Float64Sub) \
V(Float32Mul) \
V(Float32Div) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Abs) \
V(Float64Neg) \
V(Float32Abs) \
V(Float32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
V(IA32Movsxwl) \
V(IA32Movzxwl) \
V(IA32Movw) \
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
V(IA32Movdqu) \
V(IA32Movlps) \
V(IA32Movhps) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32Poke) \
V(IA32Peek) \
V(IA32F64x2Splat) \
V(F64x2ExtractLane) \
V(F64x2ReplaceLane) \
V(IA32F64x2Sqrt) \
V(IA32F64x2Add) \
V(IA32F64x2Sub) \
V(IA32F64x2Mul) \
V(IA32F64x2Div) \
V(IA32F64x2Min) \
V(IA32F64x2Max) \
V(IA32F64x2Eq) \
V(IA32F64x2Ne) \
V(IA32F64x2Lt) \
V(IA32F64x2Le) \
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
V(IA32F64x2Round) \
V(IA32F64x2ConvertLowI32x4S) \
V(IA32F64x2ConvertLowI32x4U) \
V(IA32F64x2PromoteLowF32x4) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
V(IA32I64x2Abs) \
V(IA32I64x2Neg) \
V(IA32I64x2Shl) \
V(IA32I64x2ShrS) \
V(IA32I64x2Add) \
V(IA32I64x2Sub) \
V(IA32I64x2Mul) \
V(IA32I64x2ShrU) \
V(IA32I64x2BitMask) \
V(IA32I64x2Eq) \
V(IA32I64x2Ne) \
V(IA32I64x2GtS) \
V(IA32I64x2GeS) \
V(IA32I64x2ExtMulLowI32x4S) \
V(IA32I64x2ExtMulHighI32x4S) \
V(IA32I64x2ExtMulLowI32x4U) \
V(IA32I64x2ExtMulHighI32x4U) \
V(IA32I64x2SConvertI32x4Low) \
V(IA32I64x2SConvertI32x4High) \
V(IA32I64x2UConvertI32x4Low) \
V(IA32I64x2UConvertI32x4High) \
V(IA32F32x4Splat) \
V(IA32F32x4ExtractLane) \
V(IA32Insertps) \
V(IA32F32x4SConvertI32x4) \
V(IA32F32x4UConvertI32x4) \
V(IA32F32x4Sqrt) \
V(IA32F32x4RecipApprox) \
V(IA32F32x4RecipSqrtApprox) \
V(IA32F32x4Add) \
V(IA32F32x4Sub) \
V(IA32F32x4Mul) \
V(IA32F32x4Div) \
V(IA32F32x4Min) \
V(IA32F32x4Max) \
V(IA32F32x4Eq) \
V(IA32F32x4Ne) \
V(IA32F32x4Lt) \
V(IA32F32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
V(IA32F32x4DemoteF64x2Zero) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(IA32I32x4SConvertF32x4) \
V(IA32I32x4SConvertI16x8Low) \
V(IA32I32x4SConvertI16x8High) \
V(IA32I32x4Neg) \
V(IA32I32x4Shl) \
V(IA32I32x4ShrS) \
V(IA32I32x4Add) \
V(IA32I32x4Sub) \
V(IA32I32x4Mul) \
V(IA32I32x4MinS) \
V(IA32I32x4MaxS) \
V(IA32I32x4Eq) \
V(IA32I32x4Ne) \
V(IA32I32x4GtS) \
V(IA32I32x4GeS) \
V(SSEI32x4UConvertF32x4) \
V(AVXI32x4UConvertF32x4) \
V(IA32I32x4UConvertI16x8Low) \
V(IA32I32x4UConvertI16x8High) \
V(IA32I32x4ShrU) \
V(IA32I32x4MinU) \
V(IA32I32x4MaxU) \
V(SSEI32x4GtU) \
V(AVXI32x4GtU) \
V(SSEI32x4GeU) \
V(AVXI32x4GeU) \
V(IA32I32x4Abs) \
V(IA32I32x4BitMask) \
V(IA32I32x4DotI16x8S) \
V(IA32I32x4ExtMulLowI16x8S) \
V(IA32I32x4ExtMulHighI16x8S) \
V(IA32I32x4ExtMulLowI16x8U) \
V(IA32I32x4ExtMulHighI16x8U) \
V(IA32I32x4ExtAddPairwiseI16x8S) \
V(IA32I32x4ExtAddPairwiseI16x8U) \
V(IA32I32x4TruncSatF64x2SZero) \
V(IA32I32x4TruncSatF64x2UZero) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneS) \
V(IA32I16x8SConvertI8x16Low) \
V(IA32I16x8SConvertI8x16High) \
V(IA32I16x8Neg) \
V(IA32I16x8Shl) \
V(IA32I16x8ShrS) \
V(IA32I16x8SConvertI32x4) \
V(IA32I16x8Add) \
V(IA32I16x8AddSatS) \
V(IA32I16x8Sub) \
V(IA32I16x8SubSatS) \
V(IA32I16x8Mul) \
V(IA32I16x8MinS) \
V(IA32I16x8MaxS) \
V(IA32I16x8Eq) \
V(SSEI16x8Ne) \
V(AVXI16x8Ne) \
V(IA32I16x8GtS) \
V(SSEI16x8GeS) \
V(AVXI16x8GeS) \
V(IA32I16x8UConvertI8x16Low) \
V(IA32I16x8UConvertI8x16High) \
V(IA32I16x8ShrU) \
V(IA32I16x8UConvertI32x4) \
V(IA32I16x8AddSatU) \
V(IA32I16x8SubSatU) \
V(IA32I16x8MinU) \
V(IA32I16x8MaxU) \
V(SSEI16x8GtU) \
V(AVXI16x8GtU) \
V(SSEI16x8GeU) \
V(AVXI16x8GeU) \
V(IA32I16x8RoundingAverageU) \
V(IA32I16x8Abs) \
V(IA32I16x8BitMask) \
V(IA32I16x8ExtMulLowI8x16S) \
V(IA32I16x8ExtMulHighI8x16S) \
V(IA32I16x8ExtMulLowI8x16U) \
V(IA32I16x8ExtMulHighI8x16U) \
V(IA32I16x8ExtAddPairwiseI8x16S) \
V(IA32I16x8ExtAddPairwiseI8x16U) \
V(IA32I16x8Q15MulRSatS) \
V(IA32I8x16Splat) \
V(IA32I8x16ExtractLaneS) \
V(IA32Pinsrb) \
V(IA32Pinsrw) \
V(IA32Pinsrd) \
V(IA32Pextrb) \
V(IA32Pextrw) \
V(IA32S128Store32Lane) \
V(IA32I8x16SConvertI16x8) \
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
V(IA32I8x16Add) \
V(IA32I8x16AddSatS) \
V(IA32I8x16Sub) \
V(IA32I8x16SubSatS) \
V(IA32I8x16MinS) \
V(IA32I8x16MaxS) \
V(IA32I8x16Eq) \
V(SSEI8x16Ne) \
V(AVXI8x16Ne) \
V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
V(IA32I8x16UConvertI16x8) \
V(IA32I8x16AddSatU) \
V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
V(IA32I8x16MinU) \
V(IA32I8x16MaxU) \
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
V(AVXI8x16GeU) \
V(IA32I8x16RoundingAverageU) \
V(IA32I8x16Abs) \
V(IA32I8x16BitMask) \
V(IA32I8x16Popcnt) \
V(IA32S128Const) \
V(IA32S128Zero) \
V(IA32S128AllOnes) \
V(IA32S128Not) \
V(IA32S128And) \
V(IA32S128Or) \
V(IA32S128Xor) \
V(IA32S128Select) \
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
V(IA32I8x16Shuffle) \
V(IA32S128Load8Splat) \
V(IA32S128Load16Splat) \
V(IA32S128Load32Splat) \
V(IA32S128Load64Splat) \
V(IA32S128Load8x8S) \
V(IA32S128Load8x8U) \
V(IA32S128Load16x4S) \
V(IA32S128Load16x4U) \
V(IA32S128Load32x2S) \
V(IA32S128Load32x2U) \
V(IA32S32x4Rotate) \
V(IA32S32x4Swizzle) \
V(IA32S32x4Shuffle) \
V(IA32S16x8Blend) \
V(IA32S16x8HalfShuffle1) \
V(IA32S16x8HalfShuffle2) \
V(IA32S8x16Alignr) \
V(IA32S16x8Dup) \
V(IA32S8x16Dup) \
V(SSES16x8UnzipHigh) \
V(AVXS16x8UnzipHigh) \
V(SSES16x8UnzipLow) \
V(AVXS16x8UnzipLow) \
V(SSES8x16UnzipHigh) \
V(AVXS8x16UnzipHigh) \
V(SSES8x16UnzipLow) \
V(AVXS8x16UnzipLow) \
V(IA32S64x2UnpackHigh) \
V(IA32S32x4UnpackHigh) \
V(IA32S16x8UnpackHigh) \
V(IA32S8x16UnpackHigh) \
V(IA32S64x2UnpackLow) \
V(IA32S32x4UnpackLow) \
V(IA32S16x8UnpackLow) \
V(IA32S8x16UnpackLow) \
V(SSES8x16TransposeLow) \
V(AVXS8x16TransposeLow) \
V(SSES8x16TransposeHigh) \
V(AVXS8x16TransposeHigh) \
V(SSES8x8Reverse) \
V(AVXS8x8Reverse) \
V(SSES8x4Reverse) \
V(AVXS8x4Reverse) \
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
V(IA32S128AnyTrue) \
V(IA32I64x2AllTrue) \
V(IA32I32x4AllTrue) \
V(IA32I16x8AllTrue) \
V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32ReleasePairStore) \
V(IA32Word32SeqCstPairStore) \
V(IA32Word32AtomicPairAdd) \
V(IA32Word32AtomicPairSub) \
V(IA32Word32AtomicPairAnd) \
V(IA32Word32AtomicPairOr) \
V(IA32Word32AtomicPairXor) \
V(IA32Word32AtomicPairExchange) \
V(IA32Word32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -296,23 +296,50 @@ static_assert(
"All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
using MiscField = base::BitField<int, 22, 10>;
// {MiscField} is used for a variety of things, depending on the opcode.
// TODO(turbofan): There should be an abstraction that ensures safe encoding and
// decoding. {HasMemoryAccessMode} and its uses are a small step in that
// direction.
// LaneSizeField and AccessModeField are helper types to encode/decode a lane
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
// TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
// decoding (in CodeGenerator and InstructionScheduler). Encoding (in
// InstructionSelector) is not yet guarded. There are in fact instructions for
// which InstructionSelector does set a MemoryAccessMode but CodeGenerator
// doesn't care to consume it (e.g. kArm64LdrDecompressTaggedSigned). This is
// scary. {HasMemoryAccessMode} does not include these instructions, so they can
// be easily found by guarding encoding.
inline bool HasMemoryAccessMode(ArchOpcode opcode) {
switch (opcode) {
#define CASE(Name) \
case k##Name: \
return true;
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
#undef CASE
default:
return false;
}
}
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
// AtomicWidthField overlaps with MiscField and is used for the various Atomic
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
// architectures are assumed to be 32bit wide.
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
// AtomicMemoryOrderField overlaps with MiscField and is used for the various
// Atomic opcodes. This field is not used on all architectures. It is used on
// architectures where the codegen for kSeqCst and kAcqRel differ only by
// emitting fences.
using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
using MiscField = base::BitField<int, 22, 10>;
// This static assertion serves as an early warning if we are about to exhaust
// the available opcode space. If we are about to exhaust it, we should start
......
......@@ -167,7 +167,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
} else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
} else if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
// Ensure that deopts or traps are not reordered with respect to
// side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
......@@ -176,7 +176,7 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
// Update last deoptimization or trap point.
if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
last_deopt_or_trap_ = new_node;
}
......
......@@ -169,6 +169,12 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
bool CanTrap(const Instruction* instr) const {
return instr->IsTrap() ||
(instr->HasMemoryAccessMode() &&
instr->memory_access_mode() == kMemoryAccessProtected);
}
// The scheduler will not move the following instructions before the last
// deopt/trap check:
// * loads (this is conservative)
......@@ -184,7 +190,7 @@ class InstructionScheduler final : public ZoneObject {
// trap point we encountered.
bool DependsOnDeoptOrTrap(const Instruction* instr) const {
return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
......
......@@ -882,6 +882,13 @@ class V8_EXPORT_PRIVATE Instruction final {
return FlagsConditionField::decode(opcode());
}
int misc() const { return MiscField::decode(opcode()); }
bool HasMemoryAccessMode() const {
return compiler::HasMemoryAccessMode(arch_opcode());
}
MemoryAccessMode memory_access_mode() const {
DCHECK(HasMemoryAccessMode());
return AccessModeField::decode(opcode());
}
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
......
......@@ -693,7 +693,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
const MemoryAccessMode access_mode = instr->memory_access_mode();
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
......@@ -703,7 +703,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
}
#endif // V8_ENABLE_WEBASSEMBLY
......
......@@ -11,389 +11,394 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(X64Add) \
V(X64Add32) \
V(X64And) \
V(X64And32) \
V(X64Cmp) \
V(X64Cmp32) \
V(X64Cmp16) \
V(X64Cmp8) \
V(X64Test) \
V(X64Test32) \
V(X64Test16) \
V(X64Test8) \
V(X64Or) \
V(X64Or32) \
V(X64Xor) \
V(X64Xor32) \
V(X64Sub) \
V(X64Sub32) \
V(X64Imul) \
V(X64Imul32) \
V(X64ImulHigh32) \
V(X64UmulHigh32) \
V(X64Idiv) \
V(X64Idiv32) \
V(X64Udiv) \
V(X64Udiv32) \
V(X64Not) \
V(X64Not32) \
V(X64Neg) \
V(X64Neg32) \
V(X64Shl) \
V(X64Shl32) \
V(X64Shr) \
V(X64Shr32) \
V(X64Sar) \
V(X64Sar32) \
V(X64Rol) \
V(X64Rol32) \
V(X64Ror) \
V(X64Ror32) \
V(X64Lzcnt) \
V(X64Lzcnt32) \
V(X64Tzcnt) \
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
V(X64Bswap) \
V(X64Bswap32) \
V(X64MFence) \
V(X64LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
V(SSEFloat32Mul) \
V(SSEFloat32Div) \
V(SSEFloat32Sqrt) \
V(SSEFloat32ToFloat64) \
V(SSEFloat32ToInt32) \
V(SSEFloat32ToUint32) \
V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSEFloat32Max) \
V(SSEFloat64Max) \
V(SSEFloat32Min) \
V(SSEFloat64Min) \
V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEFloat32ToInt64) \
V(SSEFloat64ToInt64) \
V(SSEFloat32ToUint64) \
V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
V(SSEInt32ToFloat32) \
V(SSEInt64ToFloat32) \
V(SSEInt64ToFloat64) \
V(SSEUint64ToFloat32) \
V(SSEUint64ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEUint32ToFloat32) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(SSEFloat64SilenceNaN) \
V(AVXFloat32Cmp) \
V(AVXFloat32Add) \
V(AVXFloat32Sub) \
V(AVXFloat32Mul) \
V(AVXFloat32Div) \
V(AVXFloat64Cmp) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
V(X64Float64Abs) \
V(X64Float64Neg) \
V(X64Float32Abs) \
V(X64Float32Neg) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movsxbq) \
V(X64Movzxbq) \
V(X64Movb) \
V(X64Movsxwl) \
V(X64Movzxwl) \
V(X64Movsxwq) \
V(X64Movzxwq) \
V(X64Movw) \
V(X64Movl) \
V(X64Movsxlq) \
V(X64MovqDecompressTaggedSigned) \
V(X64MovqDecompressTaggedPointer) \
V(X64MovqDecompressAnyTagged) \
V(X64MovqCompressTagged) \
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
V(X64Movdqu) \
V(X64BitcastFI) \
V(X64BitcastDL) \
V(X64BitcastIF) \
V(X64BitcastLD) \
V(X64Lea32) \
V(X64Lea) \
V(X64Dec32) \
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
V(X64Peek) \
V(X64F64x2Splat) \
V(X64F64x2ExtractLane) \
V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
V(X64F64x2Sqrt) \
V(X64F64x2Add) \
V(X64F64x2Sub) \
V(X64F64x2Mul) \
V(X64F64x2Div) \
V(X64F64x2Min) \
V(X64F64x2Max) \
V(X64F64x2Eq) \
V(X64F64x2Ne) \
V(X64F64x2Lt) \
V(X64F64x2Le) \
V(X64F64x2Qfma) \
V(X64F64x2Qfms) \
V(X64F64x2Pmin) \
V(X64F64x2Pmax) \
V(X64F64x2Round) \
V(X64F64x2ConvertLowI32x4S) \
V(X64F64x2ConvertLowI32x4U) \
V(X64F64x2PromoteLowF32x4) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
V(X64F32x4SConvertI32x4) \
V(X64F32x4UConvertI32x4) \
V(X64F32x4Abs) \
V(X64F32x4Neg) \
V(X64F32x4Sqrt) \
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
V(X64F32x4Sub) \
V(X64F32x4Mul) \
V(X64F32x4Div) \
V(X64F32x4Min) \
V(X64F32x4Max) \
V(X64F32x4Eq) \
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
V(X64F32x4Qfma) \
V(X64F32x4Qfms) \
V(X64F32x4Pmin) \
V(X64F32x4Pmax) \
V(X64F32x4Round) \
V(X64F32x4DemoteF64x2Zero) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2Abs) \
V(X64I64x2Neg) \
V(X64I64x2BitMask) \
V(X64I64x2Shl) \
V(X64I64x2ShrS) \
V(X64I64x2Add) \
V(X64I64x2Sub) \
V(X64I64x2Mul) \
V(X64I64x2Eq) \
V(X64I64x2GtS) \
V(X64I64x2GeS) \
V(X64I64x2Ne) \
V(X64I64x2ShrU) \
V(X64I64x2ExtMulLowI32x4S) \
V(X64I64x2ExtMulHighI32x4S) \
V(X64I64x2ExtMulLowI32x4U) \
V(X64I64x2ExtMulHighI32x4U) \
V(X64I64x2SConvertI32x4Low) \
V(X64I64x2SConvertI32x4High) \
V(X64I64x2UConvertI32x4Low) \
V(X64I64x2UConvertI32x4High) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4SConvertF32x4) \
V(X64I32x4SConvertI16x8Low) \
V(X64I32x4SConvertI16x8High) \
V(X64I32x4Neg) \
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
V(X64I32x4Sub) \
V(X64I32x4Mul) \
V(X64I32x4MinS) \
V(X64I32x4MaxS) \
V(X64I32x4Eq) \
V(X64I32x4Ne) \
V(X64I32x4GtS) \
V(X64I32x4GeS) \
V(X64I32x4UConvertF32x4) \
V(X64I32x4UConvertI16x8Low) \
V(X64I32x4UConvertI16x8High) \
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
V(X64I32x4GtU) \
V(X64I32x4GeU) \
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
V(X64I32x4DotI16x8S) \
V(X64I32x4ExtMulLowI16x8S) \
V(X64I32x4ExtMulHighI16x8S) \
V(X64I32x4ExtMulLowI16x8U) \
V(X64I32x4ExtMulHighI16x8U) \
V(X64I32x4ExtAddPairwiseI16x8S) \
V(X64I32x4ExtAddPairwiseI16x8U) \
V(X64I32x4TruncSatF64x2SZero) \
V(X64I32x4TruncSatF64x2UZero) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneS) \
V(X64I16x8SConvertI8x16Low) \
V(X64I16x8SConvertI8x16High) \
V(X64I16x8Neg) \
V(X64I16x8Shl) \
V(X64I16x8ShrS) \
V(X64I16x8SConvertI32x4) \
V(X64I16x8Add) \
V(X64I16x8AddSatS) \
V(X64I16x8Sub) \
V(X64I16x8SubSatS) \
V(X64I16x8Mul) \
V(X64I16x8MinS) \
V(X64I16x8MaxS) \
V(X64I16x8Eq) \
V(X64I16x8Ne) \
V(X64I16x8GtS) \
V(X64I16x8GeS) \
V(X64I16x8UConvertI8x16Low) \
V(X64I16x8UConvertI8x16High) \
V(X64I16x8ShrU) \
V(X64I16x8UConvertI32x4) \
V(X64I16x8AddSatU) \
V(X64I16x8SubSatU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
V(X64I16x8GtU) \
V(X64I16x8GeU) \
V(X64I16x8RoundingAverageU) \
V(X64I16x8Abs) \
V(X64I16x8BitMask) \
V(X64I16x8ExtMulLowI8x16S) \
V(X64I16x8ExtMulHighI8x16S) \
V(X64I16x8ExtMulLowI8x16U) \
V(X64I16x8ExtMulHighI8x16U) \
V(X64I16x8ExtAddPairwiseI8x16S) \
V(X64I16x8ExtAddPairwiseI8x16U) \
V(X64I16x8Q15MulRSatS) \
V(X64I8x16Splat) \
V(X64I8x16ExtractLaneS) \
V(X64Pinsrb) \
V(X64Pinsrw) \
V(X64Pinsrd) \
V(X64Pinsrq) \
V(X64Pextrb) \
V(X64Pextrw) \
V(X64I8x16SConvertI16x8) \
V(X64I8x16Neg) \
V(X64I8x16Shl) \
V(X64I8x16ShrS) \
V(X64I8x16Add) \
V(X64I8x16AddSatS) \
V(X64I8x16Sub) \
V(X64I8x16SubSatS) \
V(X64I8x16MinS) \
V(X64I8x16MaxS) \
V(X64I8x16Eq) \
V(X64I8x16Ne) \
V(X64I8x16GtS) \
V(X64I8x16GeS) \
V(X64I8x16UConvertI16x8) \
V(X64I8x16AddSatU) \
V(X64I8x16SubSatU) \
V(X64I8x16ShrU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
V(X64I8x16GtU) \
V(X64I8x16GeU) \
V(X64I8x16RoundingAverageU) \
V(X64I8x16Abs) \
V(X64I8x16BitMask) \
V(X64S128Const) \
V(X64S128Zero) \
V(X64S128AllOnes) \
V(X64S128Not) \
V(X64S128And) \
V(X64S128Or) \
V(X64S128Xor) \
V(X64S128Select) \
V(X64S128AndNot) \
V(X64I8x16Swizzle) \
V(X64I8x16Shuffle) \
V(X64I8x16Popcnt) \
V(X64S128Load8Splat) \
V(X64S128Load16Splat) \
V(X64S128Load32Splat) \
V(X64S128Load64Splat) \
V(X64S128Load8x8S) \
V(X64S128Load8x8U) \
V(X64S128Load16x4S) \
V(X64S128Load16x4U) \
V(X64S128Load32x2S) \
V(X64S128Load32x2U) \
V(X64S128Store32Lane) \
V(X64S128Store64Lane) \
V(X64Shufps) \
V(X64S32x4Rotate) \
V(X64S32x4Swizzle) \
V(X64S32x4Shuffle) \
V(X64S16x8Blend) \
V(X64S16x8HalfShuffle1) \
V(X64S16x8HalfShuffle2) \
V(X64S8x16Alignr) \
V(X64S16x8Dup) \
V(X64S8x16Dup) \
V(X64S16x8UnzipHigh) \
V(X64S16x8UnzipLow) \
V(X64S8x16UnzipHigh) \
V(X64S8x16UnzipLow) \
V(X64S64x2UnpackHigh) \
V(X64S32x4UnpackHigh) \
V(X64S16x8UnpackHigh) \
V(X64S8x16UnpackHigh) \
V(X64S64x2UnpackLow) \
V(X64S32x4UnpackLow) \
V(X64S16x8UnpackLow) \
V(X64S8x16UnpackLow) \
V(X64S8x16TransposeLow) \
V(X64S8x16TransposeHigh) \
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
V(X64V128AnyTrue) \
V(X64I64x2AllTrue) \
V(X64I32x4AllTrue) \
V(X64I16x8AllTrue) \
V(X64I8x16AllTrue) \
V(X64Word64AtomicAddUint64) \
V(X64Word64AtomicSubUint64) \
V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicExchangeUint64) \
// Opcodes that support a MemoryAccessMode.
#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(X64F64x2PromoteLowF32x4) \
V(X64Movb) \
V(X64Movdqu) \
V(X64Movl) \
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
V(X64Movsxbl) \
V(X64Movsxbq) \
V(X64Movsxlq) \
V(X64Movsxwl) \
V(X64Movsxwq) \
V(X64Movw) \
V(X64Movzxbl) \
V(X64Movzxbq) \
V(X64Movzxwl) \
V(X64Movzxwq) \
V(X64Pextrb) \
V(X64Pextrw) \
V(X64Pinsrb) \
V(X64Pinsrd) \
V(X64Pinsrq) \
V(X64Pinsrw) \
V(X64S128Load16Splat) \
V(X64S128Load16x4S) \
V(X64S128Load16x4U) \
V(X64S128Load32Splat) \
V(X64S128Load32x2S) \
V(X64S128Load32x2U) \
V(X64S128Load64Splat) \
V(X64S128Load8Splat) \
V(X64S128Load8x8S) \
V(X64S128Load8x8U) \
V(X64S128Store32Lane) \
V(X64S128Store64Lane)
#define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(X64Add) \
V(X64Add32) \
V(X64And) \
V(X64And32) \
V(X64Cmp) \
V(X64Cmp32) \
V(X64Cmp16) \
V(X64Cmp8) \
V(X64Test) \
V(X64Test32) \
V(X64Test16) \
V(X64Test8) \
V(X64Or) \
V(X64Or32) \
V(X64Xor) \
V(X64Xor32) \
V(X64Sub) \
V(X64Sub32) \
V(X64Imul) \
V(X64Imul32) \
V(X64ImulHigh32) \
V(X64UmulHigh32) \
V(X64Idiv) \
V(X64Idiv32) \
V(X64Udiv) \
V(X64Udiv32) \
V(X64Not) \
V(X64Not32) \
V(X64Neg) \
V(X64Neg32) \
V(X64Shl) \
V(X64Shl32) \
V(X64Shr) \
V(X64Shr32) \
V(X64Sar) \
V(X64Sar32) \
V(X64Rol) \
V(X64Rol32) \
V(X64Ror) \
V(X64Ror32) \
V(X64Lzcnt) \
V(X64Lzcnt32) \
V(X64Tzcnt) \
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
V(X64Bswap) \
V(X64Bswap32) \
V(X64MFence) \
V(X64LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
V(SSEFloat32Mul) \
V(SSEFloat32Div) \
V(SSEFloat32Sqrt) \
V(SSEFloat32ToFloat64) \
V(SSEFloat32ToInt32) \
V(SSEFloat32ToUint32) \
V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSEFloat32Max) \
V(SSEFloat64Max) \
V(SSEFloat32Min) \
V(SSEFloat64Min) \
V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEFloat32ToInt64) \
V(SSEFloat64ToInt64) \
V(SSEFloat32ToUint64) \
V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
V(SSEInt32ToFloat32) \
V(SSEInt64ToFloat32) \
V(SSEInt64ToFloat64) \
V(SSEUint64ToFloat32) \
V(SSEUint64ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEUint32ToFloat32) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(SSEFloat64SilenceNaN) \
V(AVXFloat32Cmp) \
V(AVXFloat32Add) \
V(AVXFloat32Sub) \
V(AVXFloat32Mul) \
V(AVXFloat32Div) \
V(AVXFloat64Cmp) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
V(X64Float64Abs) \
V(X64Float64Neg) \
V(X64Float32Abs) \
V(X64Float32Neg) \
V(X64MovqDecompressTaggedSigned) \
V(X64MovqDecompressTaggedPointer) \
V(X64MovqDecompressAnyTagged) \
V(X64MovqCompressTagged) \
V(X64BitcastFI) \
V(X64BitcastDL) \
V(X64BitcastIF) \
V(X64BitcastLD) \
V(X64Lea32) \
V(X64Lea) \
V(X64Dec32) \
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
V(X64Peek) \
V(X64F64x2Splat) \
V(X64F64x2ExtractLane) \
V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
V(X64F64x2Sqrt) \
V(X64F64x2Add) \
V(X64F64x2Sub) \
V(X64F64x2Mul) \
V(X64F64x2Div) \
V(X64F64x2Min) \
V(X64F64x2Max) \
V(X64F64x2Eq) \
V(X64F64x2Ne) \
V(X64F64x2Lt) \
V(X64F64x2Le) \
V(X64F64x2Qfma) \
V(X64F64x2Qfms) \
V(X64F64x2Pmin) \
V(X64F64x2Pmax) \
V(X64F64x2Round) \
V(X64F64x2ConvertLowI32x4S) \
V(X64F64x2ConvertLowI32x4U) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
V(X64F32x4SConvertI32x4) \
V(X64F32x4UConvertI32x4) \
V(X64F32x4Abs) \
V(X64F32x4Neg) \
V(X64F32x4Sqrt) \
V(X64F32x4RecipApprox) \
V(X64F32x4RecipSqrtApprox) \
V(X64F32x4Add) \
V(X64F32x4Sub) \
V(X64F32x4Mul) \
V(X64F32x4Div) \
V(X64F32x4Min) \
V(X64F32x4Max) \
V(X64F32x4Eq) \
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
V(X64F32x4Qfma) \
V(X64F32x4Qfms) \
V(X64F32x4Pmin) \
V(X64F32x4Pmax) \
V(X64F32x4Round) \
V(X64F32x4DemoteF64x2Zero) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2Abs) \
V(X64I64x2Neg) \
V(X64I64x2BitMask) \
V(X64I64x2Shl) \
V(X64I64x2ShrS) \
V(X64I64x2Add) \
V(X64I64x2Sub) \
V(X64I64x2Mul) \
V(X64I64x2Eq) \
V(X64I64x2GtS) \
V(X64I64x2GeS) \
V(X64I64x2Ne) \
V(X64I64x2ShrU) \
V(X64I64x2ExtMulLowI32x4S) \
V(X64I64x2ExtMulHighI32x4S) \
V(X64I64x2ExtMulLowI32x4U) \
V(X64I64x2ExtMulHighI32x4U) \
V(X64I64x2SConvertI32x4Low) \
V(X64I64x2SConvertI32x4High) \
V(X64I64x2UConvertI32x4Low) \
V(X64I64x2UConvertI32x4High) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4SConvertF32x4) \
V(X64I32x4SConvertI16x8Low) \
V(X64I32x4SConvertI16x8High) \
V(X64I32x4Neg) \
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
V(X64I32x4Sub) \
V(X64I32x4Mul) \
V(X64I32x4MinS) \
V(X64I32x4MaxS) \
V(X64I32x4Eq) \
V(X64I32x4Ne) \
V(X64I32x4GtS) \
V(X64I32x4GeS) \
V(X64I32x4UConvertF32x4) \
V(X64I32x4UConvertI16x8Low) \
V(X64I32x4UConvertI16x8High) \
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
V(X64I32x4GtU) \
V(X64I32x4GeU) \
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
V(X64I32x4DotI16x8S) \
V(X64I32x4ExtMulLowI16x8S) \
V(X64I32x4ExtMulHighI16x8S) \
V(X64I32x4ExtMulLowI16x8U) \
V(X64I32x4ExtMulHighI16x8U) \
V(X64I32x4ExtAddPairwiseI16x8S) \
V(X64I32x4ExtAddPairwiseI16x8U) \
V(X64I32x4TruncSatF64x2SZero) \
V(X64I32x4TruncSatF64x2UZero) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneS) \
V(X64I16x8SConvertI8x16Low) \
V(X64I16x8SConvertI8x16High) \
V(X64I16x8Neg) \
V(X64I16x8Shl) \
V(X64I16x8ShrS) \
V(X64I16x8SConvertI32x4) \
V(X64I16x8Add) \
V(X64I16x8AddSatS) \
V(X64I16x8Sub) \
V(X64I16x8SubSatS) \
V(X64I16x8Mul) \
V(X64I16x8MinS) \
V(X64I16x8MaxS) \
V(X64I16x8Eq) \
V(X64I16x8Ne) \
V(X64I16x8GtS) \
V(X64I16x8GeS) \
V(X64I16x8UConvertI8x16Low) \
V(X64I16x8UConvertI8x16High) \
V(X64I16x8ShrU) \
V(X64I16x8UConvertI32x4) \
V(X64I16x8AddSatU) \
V(X64I16x8SubSatU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
V(X64I16x8GtU) \
V(X64I16x8GeU) \
V(X64I16x8RoundingAverageU) \
V(X64I16x8Abs) \
V(X64I16x8BitMask) \
V(X64I16x8ExtMulLowI8x16S) \
V(X64I16x8ExtMulHighI8x16S) \
V(X64I16x8ExtMulLowI8x16U) \
V(X64I16x8ExtMulHighI8x16U) \
V(X64I16x8ExtAddPairwiseI8x16S) \
V(X64I16x8ExtAddPairwiseI8x16U) \
V(X64I16x8Q15MulRSatS) \
V(X64I8x16Splat) \
V(X64I8x16ExtractLaneS) \
V(X64I8x16SConvertI16x8) \
V(X64I8x16Neg) \
V(X64I8x16Shl) \
V(X64I8x16ShrS) \
V(X64I8x16Add) \
V(X64I8x16AddSatS) \
V(X64I8x16Sub) \
V(X64I8x16SubSatS) \
V(X64I8x16MinS) \
V(X64I8x16MaxS) \
V(X64I8x16Eq) \
V(X64I8x16Ne) \
V(X64I8x16GtS) \
V(X64I8x16GeS) \
V(X64I8x16UConvertI16x8) \
V(X64I8x16AddSatU) \
V(X64I8x16SubSatU) \
V(X64I8x16ShrU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
V(X64I8x16GtU) \
V(X64I8x16GeU) \
V(X64I8x16RoundingAverageU) \
V(X64I8x16Abs) \
V(X64I8x16BitMask) \
V(X64S128Const) \
V(X64S128Zero) \
V(X64S128AllOnes) \
V(X64S128Not) \
V(X64S128And) \
V(X64S128Or) \
V(X64S128Xor) \
V(X64S128Select) \
V(X64S128AndNot) \
V(X64I8x16Swizzle) \
V(X64I8x16Shuffle) \
V(X64I8x16Popcnt) \
V(X64Shufps) \
V(X64S32x4Rotate) \
V(X64S32x4Swizzle) \
V(X64S32x4Shuffle) \
V(X64S16x8Blend) \
V(X64S16x8HalfShuffle1) \
V(X64S16x8HalfShuffle2) \
V(X64S8x16Alignr) \
V(X64S16x8Dup) \
V(X64S8x16Dup) \
V(X64S16x8UnzipHigh) \
V(X64S16x8UnzipLow) \
V(X64S8x16UnzipHigh) \
V(X64S8x16UnzipLow) \
V(X64S64x2UnpackHigh) \
V(X64S32x4UnpackHigh) \
V(X64S16x8UnpackHigh) \
V(X64S8x16UnpackHigh) \
V(X64S64x2UnpackLow) \
V(X64S32x4UnpackLow) \
V(X64S16x8UnpackLow) \
V(X64S8x16UnpackLow) \
V(X64S8x16TransposeLow) \
V(X64S8x16TransposeHigh) \
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
V(X64V128AnyTrue) \
V(X64I64x2AllTrue) \
V(X64I32x4AllTrue) \
V(X64I16x8AllTrue) \
V(X64I8x16AllTrue) \
V(X64Word64AtomicAddUint64) \
V(X64Word64AtomicSubUint64) \
V(X64Word64AtomicAndUint64) \
V(X64Word64AtomicOrUint64) \
V(X64Word64AtomicXorUint64) \
V(X64Word64AtomicStoreWord64) \
V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -1472,13 +1472,6 @@
'regress/regress-779407': [SKIP],
}], # variant == experimental_regexp
##############################################################################
['variant == instruction_scheduling or variant == stress_instruction_scheduling', {
# BUG(12018): These tests currently fail with --turbo-instruction-scheduling.
'regress/wasm/regress-1231950': [SKIP],
'regress/wasm/regress-1242300': [SKIP],
}], # variant == instruction_scheduling or variant == stress_instruction_scheduling
################################################################################
['single_generation', {
# These tests rely on allocation site tracking which only works in the young generation.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment