macro-assembler-arm64.h 89.6 KB
Newer Older
1
// Copyright 2013 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6 7 8
#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
#error This header must be included via macro-assembler.h
#endif

9 10
#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
11

12 13
#include <vector>

14
#include "src/base/bits.h"
15
#include "src/codegen/arm64/assembler-arm64.h"
16
#include "src/codegen/bailout-reason.h"
17
#include "src/common/globals.h"
18
#include "src/objects/tagged-index.h"
19

20 21
// Simulator specific helpers.
#if USE_SIMULATOR
22 23
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
24 25
#define ASM_LOCATION_IN_ASSEMBLER(message) \
  Debug("LOCATION: " message, __LINE__, NO_PARAM)
26
#else
27
#define ASM_LOCATION(message)
28
#define ASM_LOCATION_IN_ASSEMBLER(message)
29 30 31
#endif
#else
#define ASM_LOCATION(message)
32
#define ASM_LOCATION_IN_ASSEMBLER(message)
33 34
#endif

35 36 37
namespace v8 {
namespace internal {

38 39 40 41 42 43 44 45 46
#define LS_MACRO_LIST(V)                                     \
  V(Ldrb, Register&, rt, LDRB_w)                             \
  V(Strb, Register&, rt, STRB_w)                             \
  V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
  V(Ldrh, Register&, rt, LDRH_w)                             \
  V(Strh, Register&, rt, STRH_w)                             \
  V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
  V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
  V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
47 48
  V(Ldrsw, Register&, rt, LDRSW_x)

49 50 51 52 53
#define LSPAIR_MACRO_LIST(V)                             \
  V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
  V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
  V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
#define LDA_STL_MACRO_LIST(V) \
  V(Ldarb, ldarb)             \
  V(Ldarh, ldarh)             \
  V(Ldar, ldar)               \
  V(Ldaxrb, ldaxrb)           \
  V(Ldaxrh, ldaxrh)           \
  V(Ldaxr, ldaxr)             \
  V(Stlrb, stlrb)             \
  V(Stlrh, stlrh)             \
  V(Stlr, stlr)

#define STLX_MACRO_LIST(V) \
  V(Stlxrb, stlxrb)        \
  V(Stlxrh, stlxrh)        \
  V(Stlxr, stlxr)
69 70 71 72 73 74 75 76 77 78

// ----------------------------------------------------------------------------
// Static helper functions

// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset);

// ----------------------------------------------------------------------------
// MacroAssembler

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
enum BranchType {
  // Copies of architectural conditions.
  // The associated conditions can be used in place of those, the code will
  // take care of reinterpreting them with the correct type.
  integer_eq = eq,
  integer_ne = ne,
  integer_hs = hs,
  integer_lo = lo,
  integer_mi = mi,
  integer_pl = pl,
  integer_vs = vs,
  integer_vc = vc,
  integer_hi = hi,
  integer_ls = ls,
  integer_ge = ge,
  integer_lt = lt,
  integer_gt = gt,
  integer_le = le,
  integer_al = al,
  integer_nv = nv,

  // These two are *different* from the architectural codes al and nv.
  // 'always' is used to generate unconditional branches.
  // 'never' is used to not generate a branch (generally as the inverse
  // branch type of 'always).
104 105
  always,
  never,
106
  // cbz and cbnz
107 108
  reg_zero,
  reg_not_zero,
109
  // tbz and tbnz
110 111
  reg_bit_clear,
  reg_bit_set,
112 113 114 115 116 117 118 119 120 121 122

  // Aliases.
  kBranchTypeFirstCondition = eq,
  kBranchTypeLastCondition = nv,
  kBranchTypeFirstUsingReg = reg_zero,
  kBranchTypeFirstUsingBit = reg_bit_clear
};

inline BranchType InvertBranchType(BranchType type) {
  if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
    return static_cast<BranchType>(
123
        NegateCondition(static_cast<Condition>(type)));
124 125 126 127 128
  } else {
    return static_cast<BranchType>(type ^ 1);
  }
}

129 130 131
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
// The macro assembler supports moving automatically pre-shifted immediates for
// arithmetic and logical instructions, and then applying a post shift in the
// instruction to undo the modification, in order to reduce the code emitted for
// an operation. For example:
//
//  Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
//
// This optimisation can be only partially applied when the stack pointer is an
// operand or destination, so this enumeration is used to control the shift.
enum PreShiftImmMode {
  kNoShift,          // Don't pre-shift.
  kLimitShiftForSP,  // Limit pre-shift for add/sub extend use.
  kAnyShift          // Allow any pre-shift.
};

147 148 149 150
// TODO(victorgomes): Move definition to macro-assembler.h, once all other
// platforms are updated.
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };

151
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
152
 public:
153
  using TurboAssemblerBase::TurboAssemblerBase;
154 155 156 157 158 159 160

#if DEBUG
  void set_allow_macro_instructions(bool value) {
    allow_macro_instructions_ = value;
  }
  bool allow_macro_instructions() const { return allow_macro_instructions_; }
#endif
161

162 163
  // We should not use near calls or jumps for calls to external references,
  // since the code spaces are not guaranteed to be close to each other.
164
  bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
165
    return rmode != RelocInfo::EXTERNAL_REFERENCE;
166 167
  }

168 169
  static bool IsNearCallOffset(int64_t offset);

170 171 172 173 174 175 176
  // Activation support.
  void EnterFrame(StackFrame::Type type);
  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
    // Out-of-line constant pool not implemented on arm64.
    UNREACHABLE();
  }
  void LeaveFrame(StackFrame::Type type);
177

178
  inline void InitializeRootRegister();
179

180
  void Mov(const Register& rd, const Operand& operand,
181 182
           DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
  void Mov(const Register& rd, uint64_t imm);
183 184 185 186 187
  void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
           int vn_index) {
    DCHECK(allow_macro_instructions());
    mov(vd, vd_index, vn, vn_index);
  }
188
  void Mov(const Register& rd, Smi smi);
189 190 191 192 193 194 195 196 197 198 199 200
  void Mov(const VRegister& vd, const VRegister& vn, int index) {
    DCHECK(allow_macro_instructions());
    mov(vd, vn, index);
  }
  void Mov(const VRegister& vd, int vd_index, const Register& rn) {
    DCHECK(allow_macro_instructions());
    mov(vd, vd_index, rn);
  }
  void Mov(const Register& rd, const VRegister& vn, int vn_index) {
    DCHECK(allow_macro_instructions());
    mov(rd, vn, vn_index);
  }
201

202
  // These are required for compatibility with architecture independent code.
203
  // Remove if not needed.
204
  void Move(Register dst, Smi src);
205 206
  void Move(Register dst, MemOperand src);
  void Move(Register dst, Register src);
207

208 209 210
  // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
  void MovePair(Register dst0, Register src0, Register dst1, Register src1);

211 212 213 214
  // Register swap. Note that the register operands should be distinct.
  void Swap(Register lhs, Register rhs);
  void Swap(VRegister lhs, VRegister rhs);

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
  V(fmla, Fmla)                      \
  V(fmls, Fmls)                      \
  V(fmul, Fmul)                      \
  V(fmulx, Fmulx)                    \
  V(mul, Mul)                        \
  V(mla, Mla)                        \
  V(mls, Mls)                        \
  V(sqdmulh, Sqdmulh)                \
  V(sqrdmulh, Sqrdmulh)              \
  V(sqdmull, Sqdmull)                \
  V(sqdmull2, Sqdmull2)              \
  V(sqdmlal, Sqdmlal)                \
  V(sqdmlal2, Sqdmlal2)              \
  V(sqdmlsl, Sqdmlsl)                \
  V(sqdmlsl2, Sqdmlsl2)              \
  V(smull, Smull)                    \
  V(smull2, Smull2)                  \
  V(smlal, Smlal)                    \
  V(smlal2, Smlal2)                  \
  V(smlsl, Smlsl)                    \
  V(smlsl2, Smlsl2)                  \
  V(umull, Umull)                    \
  V(umull2, Umull2)                  \
  V(umlal, Umlal)                    \
  V(umlal2, Umlal2)                  \
  V(umlsl, Umlsl)                    \
  V(umlsl2, Umlsl2)
244

245 246 247 248 249 250 251 252
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                   \
  void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
            int vm_index) {                                                \
    DCHECK(allow_macro_instructions());                                    \
    ASM(vd, vn, vm, vm_index);                                             \
  }
  NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
253

254 255 256 257 258 259 260 261 262 263 264
// NEON 2 vector register instructions.
#define NEON_2VREG_MACRO_LIST(V) \
  V(abs, Abs)                    \
  V(addp, Addp)                  \
  V(addv, Addv)                  \
  V(cls, Cls)                    \
  V(clz, Clz)                    \
  V(cnt, Cnt)                    \
  V(faddp, Faddp)                \
  V(fcvtas, Fcvtas)              \
  V(fcvtau, Fcvtau)              \
265
  V(fcvtl, Fcvtl)                \
266 267
  V(fcvtms, Fcvtms)              \
  V(fcvtmu, Fcvtmu)              \
268
  V(fcvtn, Fcvtn)                \
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
  V(fcvtns, Fcvtns)              \
  V(fcvtnu, Fcvtnu)              \
  V(fcvtps, Fcvtps)              \
  V(fcvtpu, Fcvtpu)              \
  V(fmaxnmp, Fmaxnmp)            \
  V(fmaxnmv, Fmaxnmv)            \
  V(fmaxp, Fmaxp)                \
  V(fmaxv, Fmaxv)                \
  V(fminnmp, Fminnmp)            \
  V(fminnmv, Fminnmv)            \
  V(fminp, Fminp)                \
  V(fminv, Fminv)                \
  V(fneg, Fneg)                  \
  V(frecpe, Frecpe)              \
  V(frecpx, Frecpx)              \
  V(frinta, Frinta)              \
  V(frinti, Frinti)              \
  V(frintm, Frintm)              \
  V(frintn, Frintn)              \
  V(frintp, Frintp)              \
  V(frintx, Frintx)              \
  V(frintz, Frintz)              \
  V(frsqrte, Frsqrte)            \
  V(fsqrt, Fsqrt)                \
  V(mov, Mov)                    \
  V(mvn, Mvn)                    \
  V(neg, Neg)                    \
  V(not_, Not)                   \
  V(rbit, Rbit)                  \
  V(rev16, Rev16)                \
  V(rev32, Rev32)                \
  V(rev64, Rev64)                \
  V(sadalp, Sadalp)              \
  V(saddlp, Saddlp)              \
  V(saddlv, Saddlv)              \
  V(smaxv, Smaxv)                \
  V(sminv, Sminv)                \
  V(sqabs, Sqabs)                \
  V(sqneg, Sqneg)                \
  V(sqxtn2, Sqxtn2)              \
  V(sqxtn, Sqxtn)                \
  V(sqxtun2, Sqxtun2)            \
  V(sqxtun, Sqxtun)              \
  V(suqadd, Suqadd)              \
  V(sxtl2, Sxtl2)                \
  V(sxtl, Sxtl)                  \
  V(uadalp, Uadalp)              \
  V(uaddlp, Uaddlp)              \
  V(uaddlv, Uaddlv)              \
  V(umaxv, Umaxv)                \
  V(uminv, Uminv)                \
  V(uqxtn2, Uqxtn2)              \
  V(uqxtn, Uqxtn)                \
  V(urecpe, Urecpe)              \
  V(ursqrte, Ursqrte)            \
  V(usqadd, Usqadd)              \
  V(uxtl2, Uxtl2)                \
  V(uxtl, Uxtl)                  \
  V(xtn2, Xtn2)                  \
  V(xtn, Xtn)
329

330 331 332 333 334 335 336 337
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
  void MASM(const VRegister& vd, const VRegister& vn) { \
    DCHECK(allow_macro_instructions());                 \
    ASM(vd, vn);                                        \
  }
  NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
#undef NEON_2VREG_MACRO_LIST
338

339 340 341 342 343 344 345
// NEON 2 vector register with immediate instructions.
#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
  V(fcmeq, Fcmeq)                      \
  V(fcmge, Fcmge)                      \
  V(fcmgt, Fcmgt)                      \
  V(fcmle, Fcmle)                      \
  V(fcmlt, Fcmlt)
346

347 348 349 350 351 352 353
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
  void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
    DCHECK(allow_macro_instructions());                             \
    ASM(vd, vn, imm);                                               \
  }
  NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
354

355 356 357 358
// NEON 3 vector register instructions.
#define NEON_3VREG_MACRO_LIST(V) \
  V(add, Add)                    \
  V(addhn2, Addhn2)              \
359
  V(addhn, Addhn)                \
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
  V(addp, Addp)                  \
  V(and_, And)                   \
  V(bic, Bic)                    \
  V(bif, Bif)                    \
  V(bit, Bit)                    \
  V(bsl, Bsl)                    \
  V(cmeq, Cmeq)                  \
  V(cmge, Cmge)                  \
  V(cmgt, Cmgt)                  \
  V(cmhi, Cmhi)                  \
  V(cmhs, Cmhs)                  \
  V(cmtst, Cmtst)                \
  V(eor, Eor)                    \
  V(fabd, Fabd)                  \
  V(facge, Facge)                \
  V(facgt, Facgt)                \
  V(faddp, Faddp)                \
  V(fcmeq, Fcmeq)                \
  V(fcmge, Fcmge)                \
  V(fcmgt, Fcmgt)                \
  V(fmaxnmp, Fmaxnmp)            \
  V(fmaxp, Fmaxp)                \
  V(fminnmp, Fminnmp)            \
  V(fminp, Fminp)                \
  V(fmla, Fmla)                  \
  V(fmls, Fmls)                  \
  V(fmulx, Fmulx)                \
387
  V(fnmul, Fnmul)                \
388 389 390 391 392 393
  V(frecps, Frecps)              \
  V(frsqrts, Frsqrts)            \
  V(mla, Mla)                    \
  V(mls, Mls)                    \
  V(mul, Mul)                    \
  V(orn, Orn)                    \
394
  V(orr, Orr)                    \
395
  V(pmull2, Pmull2)              \
396 397
  V(pmull, Pmull)                \
  V(pmul, Pmul)                  \
398
  V(raddhn2, Raddhn2)            \
399
  V(raddhn, Raddhn)              \
400
  V(rsubhn2, Rsubhn2)            \
401
  V(rsubhn, Rsubhn)              \
402
  V(sabal2, Sabal2)              \
403 404
  V(sabal, Sabal)                \
  V(saba, Saba)                  \
405
  V(sabdl2, Sabdl2)              \
406 407
  V(sabdl, Sabdl)                \
  V(sabd, Sabd)                  \
408
  V(saddl2, Saddl2)              \
409
  V(saddl, Saddl)                \
410
  V(saddw2, Saddw2)              \
411
  V(saddw, Saddw)                \
412 413 414
  V(shadd, Shadd)                \
  V(shsub, Shsub)                \
  V(smaxp, Smaxp)                \
415
  V(smax, Smax)                  \
416
  V(sminp, Sminp)                \
417
  V(smin, Smin)                  \
418
  V(smlal2, Smlal2)              \
419
  V(smlal, Smlal)                \
420
  V(smlsl2, Smlsl2)              \
421
  V(smlsl, Smlsl)                \
422
  V(smull2, Smull2)              \
423 424 425 426
  V(smull, Smull)                \
  V(sqadd, Sqadd)                \
  V(sqdmlal2, Sqdmlal2)          \
  V(sqdmlal, Sqdmlal)            \
427
  V(sqdmlsl2, Sqdmlsl2)          \
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
  V(sqdmlsl, Sqdmlsl)            \
  V(sqdmulh, Sqdmulh)            \
  V(sqdmull2, Sqdmull2)          \
  V(sqdmull, Sqdmull)            \
  V(sqrdmulh, Sqrdmulh)          \
  V(sqrshl, Sqrshl)              \
  V(sqshl, Sqshl)                \
  V(sqsub, Sqsub)                \
  V(srhadd, Srhadd)              \
  V(srshl, Srshl)                \
  V(sshl, Sshl)                  \
  V(ssubl2, Ssubl2)              \
  V(ssubl, Ssubl)                \
  V(ssubw2, Ssubw2)              \
  V(ssubw, Ssubw)                \
  V(subhn2, Subhn2)              \
  V(subhn, Subhn)                \
  V(sub, Sub)                    \
  V(trn1, Trn1)                  \
  V(trn2, Trn2)                  \
448
  V(uabal2, Uabal2)              \
449 450
  V(uabal, Uabal)                \
  V(uaba, Uaba)                  \
451
  V(uabdl2, Uabdl2)              \
452 453
  V(uabdl, Uabdl)                \
  V(uabd, Uabd)                  \
454
  V(uaddl2, Uaddl2)              \
455
  V(uaddl, Uaddl)                \
456
  V(uaddw2, Uaddw2)              \
457
  V(uaddw, Uaddw)                \
458 459
  V(uhadd, Uhadd)                \
  V(uhsub, Uhsub)                \
460
  V(umaxp, Umaxp)                \
461
  V(umax, Umax)                  \
462
  V(uminp, Uminp)                \
463
  V(umin, Umin)                  \
464 465
  V(umlal2, Umlal2)              \
  V(umlal, Umlal)                \
466
  V(umlsl2, Umlsl2)              \
467
  V(umlsl, Umlsl)                \
468
  V(umull2, Umull2)              \
469
  V(umull, Umull)                \
470 471 472 473 474 475 476 477
  V(uqadd, Uqadd)                \
  V(uqrshl, Uqrshl)              \
  V(uqshl, Uqshl)                \
  V(uqsub, Uqsub)                \
  V(urhadd, Urhadd)              \
  V(urshl, Urshl)                \
  V(ushl, Ushl)                  \
  V(usubl2, Usubl2)              \
478
  V(usubl, Usubl)                \
479
  V(usubw2, Usubw2)              \
480
  V(usubw, Usubw)                \
481 482 483 484 485 486 487
  V(uzp1, Uzp1)                  \
  V(uzp2, Uzp2)                  \
  V(zip1, Zip1)                  \
  V(zip2, Zip2)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
  void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
488
    DCHECK(allow_macro_instructions());                                      \
489 490 491 492 493
    ASM(vd, vn, vm);                                                         \
  }
  NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC

494 495 496
  void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
    DCHECK(allow_macro_instructions());
    bic(vd, imm8, left_shift);
497 498
  }

499 500
  // This is required for compatibility in architecture independent code.
  inline void jmp(Label* L);
501

502 503 504 505
  void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
  inline void B(Label* label);
  inline void B(Condition cond, Label* label);
  void B(Label* label, Condition cond);
506

507 508
  void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
  void Tbz(const Register& rt, unsigned bit_pos, Label* label);
509

510 511
  void Cbnz(const Register& rt, Label* label);
  void Cbz(const Register& rt, Label* label);
512

513
  void Pacibsp() {
514
    DCHECK(allow_macro_instructions_);
515
    pacibsp();
516
  }
517
  void Autibsp() {
518
    DCHECK(allow_macro_instructions_);
519
    autibsp();
520 521 522 523 524 525 526 527 528
  }

  // The 1716 pac and aut instructions encourage people to use x16 and x17
  // directly, perhaps without realising that this is forbidden. For example:
  //
  //     UseScratchRegisterScope temps(&masm);
  //     Register temp = temps.AcquireX();  // temp will be x16
  //     __ Mov(x17, ptr);
  //     __ Mov(x16, modifier);  // Will override temp!
529
  //     __ Pacib1716();
530 531 532 533 534 535 536 537 538
  //
  // To work around this issue, you must exclude x16 and x17 from the scratch
  // register list. You may need to replace them with other registers:
  //
  //     UseScratchRegisterScope temps(&masm);
  //     temps.Exclude(x16, x17);
  //     temps.Include(x10, x11);
  //     __ Mov(x17, ptr);
  //     __ Mov(x16, modifier);
539 540
  //     __ Pacib1716();
  void Pacib1716() {
541 542 543
    DCHECK(allow_macro_instructions_);
    DCHECK(!TmpList()->IncludesAliasOf(x16));
    DCHECK(!TmpList()->IncludesAliasOf(x17));
544
    pacib1716();
545
  }
546
  void Autib1716() {
547 548 549
    DCHECK(allow_macro_instructions_);
    DCHECK(!TmpList()->IncludesAliasOf(x16));
    DCHECK(!TmpList()->IncludesAliasOf(x17));
550
    autib1716();
551 552
  }

553 554 555
  inline void Dmb(BarrierDomain domain, BarrierType type);
  inline void Dsb(BarrierDomain domain, BarrierType type);
  inline void Isb();
556
  inline void Csdb();
557

558
  inline void SmiUntag(Register dst, Register src);
559
  inline void SmiUntag(Register dst, const MemOperand& src);
560 561
  inline void SmiUntag(Register smi);

562 563
  inline void SmiToInt32(Register smi);

564 565
  // Calls Abort(msg) if the condition cond is not satisfied.
  // Use --debug_code to enable.
566
  void Assert(Condition cond, AbortReason reason);
567

568 569 570 571
  // Like Assert(), but without condition.
  // Use --debug_code to enable.
  void AssertUnreachable(AbortReason reason);

572 573
  void AssertSmi(Register object,
                 AbortReason reason = AbortReason::kOperandIsNotASmi);
574 575

  // Like Assert(), but always enabled.
576
  void Check(Condition cond, AbortReason reason);
577 578 579

  inline void Debug(const char* message, uint32_t code, Instr params = BREAK);

580 581
  void Trap();
  void DebugBreak();
582

583
  // Print a message to stderr and abort execution.
584
  void Abort(AbortReason reason);
585

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
  // Like printf, but print at run-time from generated code.
  //
  // The caller must ensure that arguments for floating-point placeholders
  // (such as %e, %f or %g) are VRegisters, and that arguments for integer
  // placeholders are Registers.
  //
  // Format placeholders that refer to more than one argument, or to a specific
  // argument, are not supported. This includes formats like "%1$d" or "%.*d".
  //
  // This function automatically preserves caller-saved registers so that
  // calling code can use Printf at any point without having to worry about
  // corruption. The preservation mechanism generates a lot of code. If this is
  // a problem, preserve the important registers manually and then call
  // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
  // implicitly preserved.
  void Printf(const char* format, CPURegister arg0 = NoCPUReg,
              CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
              CPURegister arg3 = NoCPUReg);

  // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
  //
  // The return code from the system printf call will be returned in x0.
  void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
                        const CPURegister& arg1 = NoCPUReg,
                        const CPURegister& arg2 = NoCPUReg,
                        const CPURegister& arg3 = NoCPUReg);

613 614 615 616 617 618 619 620 621
  // Remaining instructions are simple pass-through calls to the assembler.
  inline void Asr(const Register& rd, const Register& rn, unsigned shift);
  inline void Asr(const Register& rd, const Register& rn, const Register& rm);

  // Try to move an immediate into the destination register in a single
  // instruction. Returns true for success, and updates the contents of dst.
  // Returns false, otherwise.
  bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);

622 623
  inline void Bind(Label* label,
                   BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
624

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
  // Control-flow integrity:

  // Define a function entrypoint.
  inline void CodeEntry();
  // Define an exception handler.
  inline void ExceptionHandler();
  // Define an exception handler and bind a label.
  inline void BindExceptionHandler(Label* label);

  // Control-flow integrity:

  // Define a jump (BR) target.
  inline void JumpTarget();
  // Define a jump (BR) target and bind a label.
  inline void BindJumpTarget(Label* label);
  // Define a call (BLR) target. The target also allows tail calls (via BR)
  // when the target is x16 or x17.
  inline void CallTarget();
  // Define a jump/call target.
  inline void JumpOrCallTarget();
  // Define a jump/call target and bind a label.
  inline void BindJumpOrCallTarget(Label* label);

648
  static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size);
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686

  CPURegList* TmpList() { return &tmp_list_; }
  CPURegList* FPTmpList() { return &fptmp_list_; }

  static CPURegList DefaultTmpList();
  static CPURegList DefaultFPTmpList();

  // Move macros.
  inline void Mvn(const Register& rd, uint64_t imm);
  void Mvn(const Register& rd, const Operand& operand);
  static bool IsImmMovn(uint64_t imm, unsigned reg_size);
  static bool IsImmMovz(uint64_t imm, unsigned reg_size);

  void LogicalMacro(const Register& rd, const Register& rn,
                    const Operand& operand, LogicalOp op);
  void AddSubMacro(const Register& rd, const Register& rn,
                   const Operand& operand, FlagsUpdate S, AddSubOp op);
  inline void Orr(const Register& rd, const Register& rn,
                  const Operand& operand);
  void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
    DCHECK(allow_macro_instructions());
    orr(vd, imm8, left_shift);
  }
  inline void Orn(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void Eor(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void Eon(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void And(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void Ands(const Register& rd, const Register& rn,
                   const Operand& operand);
  inline void Tst(const Register& rn, const Operand& operand);
  inline void Bic(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void Blr(const Register& xn);
  inline void Cmp(const Register& rn, const Operand& operand);
687
  inline void CmpTagged(const Register& rn, const Operand& operand);
688 689
  inline void Subs(const Register& rd, const Register& rn,
                   const Operand& operand);
690 691
  void Csel(const Register& rd, const Register& rn, const Operand& operand,
            Condition cond);
692 693
  inline void Fcsel(const VRegister& fd, const VRegister& fn,
                    const VRegister& fm, Condition cond);
694

695 696
  // Emits a runtime assert that the stack pointer is aligned.
  void AssertSpAligned();
697

698 699 700 701
  // Copy slot_count stack slots from the stack offset specified by src to
  // the stack offset specified by dst. The offsets and count are expressed in
  // slot-sized units. Offset dst must be less than src, or the gap between
  // them must be greater than or equal to slot_count, otherwise the result is
702 703
  // unpredictable. The function may corrupt its register arguments. The
  // registers must not alias each other.
704 705 706 707
  void CopySlots(int dst, Register src, Register slot_count);
  void CopySlots(Register dst, Register src, Register slot_count);

  // Copy count double words from the address in register src to the address
708
  // in register dst. There are three modes for this function:
709 710 711 712 713 714 715
  // 1) Address dst must be less than src, or the gap between them must be
  //    greater than or equal to count double words, otherwise the result is
  //    unpredictable. This is the default mode.
  // 2) Address src must be less than dst, or the gap between them must be
  //    greater than or equal to count double words, otherwise the result is
  //    undpredictable. In this mode, src and dst specify the last (highest)
  //    address of the regions to copy from and to.
716
  // 3) The same as mode 1, but the words are copied in the reversed order.
717 718 719
  // The case where src == dst is not supported.
  // The function may corrupt its register arguments. The registers must not
  // alias each other.
720 721 722 723 724
  enum CopyDoubleWordsMode {
    kDstLessThanSrc,
    kSrcLessThanDst,
    kDstLessThanSrcAndReverse
  };
725 726
  void CopyDoubleWords(Register dst, Register src, Register count,
                       CopyDoubleWordsMode mode = kDstLessThanSrc);
727

728 729 730 731 732 733
  // Calculate the address of a double word-sized slot at slot_offset from the
  // stack pointer, and write it to dst. Positive slot_offsets are at addresses
  // greater than sp, with slot zero at sp.
  void SlotAddress(Register dst, int slot_offset);
  void SlotAddress(Register dst, Register slot_offset);

734 735 736
  // Load a literal from the inline constant pool.
  inline void Ldr(const CPURegister& rt, const Operand& imm);

737
  // Claim or drop stack space.
738
  //
739 740
  // On Windows, Claim will write a value every 4k, as is required by the stack
  // expansion mechanism.
741
  //
742 743
  // The stack pointer must be aligned to 16 bytes and the size claimed or
  // dropped must be a multiple of 16 bytes.
744 745 746 747 748 749 750 751
  //
  // Note that unit_size must be specified in bytes. For variants which take a
  // Register count, the unit size must be a power of two.
  inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
  inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
  inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
  inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);

752 753
  // Drop 'count' arguments from the stack, rounded up to a multiple of two,
  // without actually accessing memory.
754 755 756 757
  // We assume the size of the arguments is the pointer size.
  // An optional mode argument is passed, which can indicate we need to
  // explicitly add the receiver to the count.
  enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
758
  inline void DropArguments(const Register& count,
759
                            ArgumentsCountMode mode = kCountIncludesReceiver);
760 761
  inline void DropArguments(int64_t count,
                            ArgumentsCountMode mode = kCountIncludesReceiver);
762

763 764 765
  // Drop 'count' slots from stack, rounded up to a multiple of two, without
  // actually accessing memory.
  inline void DropSlots(int64_t count);
766

767
  // Push a single argument, with padding, to the stack.
768 769
  inline void PushArgument(const Register& arg);

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
  // Add and sub macros.
  inline void Add(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void Adds(const Register& rd, const Register& rn,
                   const Operand& operand);
  inline void Sub(const Register& rd, const Register& rn,
                  const Operand& operand);

  // Abort execution if argument is not a positive or zero integer, enabled via
  // --debug-code.
  void AssertPositiveOrZero(Register value);

#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
  inline void FN(const REGTYPE REG, const MemOperand& addr);
  LS_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

787
  // Push or pop up to 4 registers of the same width to or from the stack.
788 789 790 791 792 793 794 795 796 797 798 799 800
  //
  // If an argument register is 'NoReg', all further arguments are also assumed
  // to be 'NoReg', and are thus not pushed or popped.
  //
  // Arguments are ordered such that "Push(a, b);" is functionally equivalent
  // to "Push(a); Push(b);".
  //
  // It is valid to push the same register more than once, and there is no
  // restriction on the order in which registers are specified.
  //
  // It is not valid to pop into the same register more than once in one
  // operation, not even into the zero register.
  //
801 802
  // The stack pointer must be aligned to 16 bytes on entry and the total size
  // of the specified registers must also be a multiple of 16 bytes.
803
  //
804 805 806 807 808 809 810 811 812 813
  // Other than the registers passed into Pop, the stack pointer, (possibly)
  // the system stack pointer and (possibly) the link register, these methods
  // do not modify any other registers.
  //
  // Some of the methods take an optional LoadLRMode or StoreLRMode template
  // argument, which specifies whether we need to sign the link register at the
  // start of the operation, or authenticate it at the end of the operation,
  // when control flow integrity measures are enabled.
  // When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
  // argument to the operation.
814 815
  enum LoadLRMode { kAuthLR, kDontLoadLR };
  enum StoreLRMode { kSignLR, kDontStoreLR };
816
  template <StoreLRMode lr_mode = kDontStoreLR>
817 818 819 820 821 822
  void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
            const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
  void Push(const CPURegister& src0, const CPURegister& src1,
            const CPURegister& src2, const CPURegister& src3,
            const CPURegister& src4, const CPURegister& src5 = NoReg,
            const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
823
  template <LoadLRMode lr_mode = kDontLoadLR>
824 825 826 827 828 829
  void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
           const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
  void Pop(const CPURegister& dst0, const CPURegister& dst1,
           const CPURegister& dst2, const CPURegister& dst3,
           const CPURegister& dst4, const CPURegister& dst5 = NoReg,
           const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
830
  template <StoreLRMode lr_mode = kDontStoreLR>
831 832
  void Push(const Register& src0, const VRegister& src1);

833 834
  void MaybeSaveRegisters(RegList registers);
  void MaybeRestoreRegisters(RegList registers);
835

836
  void CallEphemeronKeyBarrier(Register object, Operand offset,
837
                               SaveFPRegsMode fp_mode);
838

839
  void CallRecordWriteStubSaveRegisters(
840 841 842
      Register object, Operand offset,
      RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
      StubCallMode mode = StubCallMode::kCallBuiltinPointer);
843 844 845 846
  void CallRecordWriteStub(
      Register object, Register slot_address,
      RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
      StubCallMode mode = StubCallMode::kCallBuiltinPointer);
847

848 849 850 851 852 853 854 855 856
  // For a given |object| and |offset|:
  //   - Move |object| to |dst_object|.
  //   - Compute the address of the slot pointed to by |offset| in |object| and
  //     write it to |dst_slot|.
  // This method makes sure |object| and |offset| are allowed to overlap with
  // the destination registers.
  void MoveObjectAndSlot(Register dst_object, Register dst_slot,
                         Register object, Operand offset);

857 858 859 860 861 862 863 864 865 866
  // Alternative forms of Push and Pop, taking a RegList or CPURegList that
  // specifies the registers that are to be pushed or popped. Higher-numbered
  // registers are associated with higher memory addresses (as in the A32 push
  // and pop instructions).
  //
  // (Push|Pop)SizeRegList allow you to specify the register size as a
  // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
  // kSRegSizeInBits are supported.
  //
  // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
867 868 869 870 871 872 873
  //
  // The methods take an optional LoadLRMode or StoreLRMode template argument.
  // When control flow integrity measures are enabled and the link register is
  // included in 'registers', passing kSignLR to PushCPURegList will sign the
  // link register before pushing the list, and passing kAuthLR to
  // PopCPURegList will authenticate it after popping the list.
  template <StoreLRMode lr_mode = kDontStoreLR>
874
  void PushCPURegList(CPURegList registers);
875
  template <LoadLRMode lr_mode = kDontLoadLR>
876 877
  void PopCPURegList(CPURegList registers);

878 879 880
  // Calculate how much stack space (in bytes) are required to store caller
  // registers excluding those specified in the arguments.
  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
881
                                      Register exclusion) const;
882 883 884

  // Push caller saved registers on the stack, and return the number of bytes
  // stack pointer is adjusted.
885 886
  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);

887 888
  // Restore caller saved registers from the stack, and return the number of
  // bytes stack pointer is adjusted.
889
  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
890

891 892 893 894 895 896 897 898
  // Move an immediate into register dst, and return an Operand object for use
  // with a subsequent instruction that accepts a shift. The value moved into
  // dst is not necessarily equal to imm; it may have had a shifting operation
  // applied to it that will be subsequently undone by the shift applied in the
  // Operand.
  Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
                                    PreShiftImmMode mode);

899 900
  void CheckPageFlag(const Register& object, int mask, Condition cc,
                     Label* condition_met);
901

902 903 904 905 906 907 908
  // Compare a register with an operand, and branch to label depending on the
  // condition. May corrupt the status flags.
  inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
                               Condition cond, Label* label);
  inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
                                     Condition cond, Label* label);

909 910 911 912 913 914 915 916 917 918 919 920 921
  // Test the bits of register defined by bit_pattern, and branch if ANY of
  // those bits are set. May corrupt the status flags.
  inline void TestAndBranchIfAnySet(const Register& reg,
                                    const uint64_t bit_pattern, Label* label);

  // Test the bits of register defined by bit_pattern, and branch if ALL of
  // those bits are clear (ie. not set.) May corrupt the status flags.
  inline void TestAndBranchIfAllClear(const Register& reg,
                                      const uint64_t bit_pattern, Label* label);

  inline void Brk(int code);

  inline void JumpIfSmi(Register value, Label* smi_label,
922
                        Label* not_smi_label = nullptr);
923

924 925 926
  inline void JumpIfEqual(Register x, int32_t y, Label* dest);
  inline void JumpIfLessThan(Register x, int32_t y, Label* dest);

927 928
  void LoadMap(Register dst, Register object);

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
  inline void Fmov(VRegister fd, VRegister fn);
  inline void Fmov(VRegister fd, Register rn);
  // Provide explicit double and float interfaces for FP immediate moves, rather
  // than relying on implicit C++ casts. This allows signalling NaNs to be
  // preserved when the immediate matches the format of fd. Most systems convert
  // signalling NaNs to quiet NaNs when converting between float and double.
  inline void Fmov(VRegister fd, double imm);
  inline void Fmov(VRegister fd, float imm);
  // Provide a template to allow other types to be converted automatically.
  template <typename T>
  void Fmov(VRegister fd, T imm) {
    DCHECK(allow_macro_instructions());
    Fmov(fd, static_cast<double>(imm));
  }
  inline void Fmov(Register rd, VRegister fn);

  void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
            int shift_amount = 0);
  void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);

949 950 951
  void LoadFromConstantsTable(Register destination, int constant_index) final;
  void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
  void LoadRootRelative(Register destination, int32_t offset) final;
952 953

  void Jump(Register target, Condition cond = al);
954 955
  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
956
  void Jump(const ExternalReference& reference);
957 958 959 960

  void Call(Register target);
  void Call(Address target, RelocInfo::Mode rmode);
  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
961
  void Call(ExternalReference target);
962

963 964 965
  // Generate an indirect call (for when a direct call's range is not adequate).
  void IndirectCall(Address target, RelocInfo::Mode rmode);

966
  // Load the builtin given by the Smi in |builtin_| into the same
967
  // register.
968 969 970 971 972 973
  void LoadEntryFromBuiltinIndex(Register builtin);
  void LoadEntryFromBuiltin(Builtin builtin, Register destination);
  MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
  void CallBuiltinByIndex(Register builtin);
  void CallBuiltin(Builtin builtin);
  void TailCallBuiltin(Builtin builtin);
974

975 976
  void LoadCodeObjectEntry(Register destination, Register code_object);
  void CallCodeObject(Register code_object);
977
  void JumpCodeObject(Register code_object,
978
                      JumpMode jump_mode = JumpMode::kJump);
979

980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
  // Load code entry point from the CodeDataContainer object.
  void LoadCodeDataContainerEntry(Register destination,
                                  Register code_data_container_object);
  // Load code entry point from the CodeDataContainer object and compute
  // Code object pointer out of it. Must not be used for CodeDataContainers
  // corresponding to builtins, because their entry points values point to
  // the embedded instruction stream in .text section.
  void LoadCodeDataContainerCodeNonBuiltin(Register destination,
                                           Register code_data_container_object);
  void CallCodeDataContainerObject(Register code_data_container_object);
  void JumpCodeDataContainerObject(Register code_data_container_object,
                                   JumpMode jump_mode = JumpMode::kJump);

  // Helper functions that dispatch either to Call/JumpCodeObject or to
  // Call/JumpCodeDataContainerObject.
  void LoadCodeTEntry(Register destination, Register code);
  void CallCodeTObject(Register code);
  void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);

999 1000 1001 1002 1003
  // Generates an instruction sequence s.t. the return address points to the
  // instruction following the call.
  // The return address on the stack is used by frame iteration.
  void StoreReturnAddressAndCall(Register target);

1004
  void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
1005
                             DeoptimizeKind kind, Label* ret,
1006
                             Label* jump_deoptimization_entry_label);
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
  // Calls a C function.
  // The called function is not allowed to trigger a
  // garbage collection, since that might move the code and invalidate the
  // return address (unless this is somehow accounted for by the called
  // function).
  void CallCFunction(ExternalReference function, int num_reg_arguments);
  void CallCFunction(ExternalReference function, int num_reg_arguments,
                     int num_double_arguments);
  void CallCFunction(Register function, int num_reg_arguments,
                     int num_double_arguments);

  // Performs a truncating conversion of a floating point number as used by
  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
  // Exits with 'result' holding the answer.
1022
  void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
1023 1024
                         DoubleRegister double_input, StubCallMode stub_mode,
                         LinkRegisterStatus lr_status);
1025 1026 1027 1028 1029 1030 1031 1032 1033

  inline void Mul(const Register& rd, const Register& rn, const Register& rm);

  inline void Fcvtzs(const Register& rd, const VRegister& fn);
  void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
    DCHECK(allow_macro_instructions());
    fcvtzs(vd, vn, fbits);
  }

1034 1035 1036 1037 1038 1039
  void Fjcvtzs(const Register& rd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    DCHECK(!rd.IsZero());
    fjcvtzs(rd, vn);
  }

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
  inline void Fcvtzu(const Register& rd, const VRegister& fn);
  void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
    DCHECK(allow_macro_instructions());
    fcvtzu(vd, vn, fbits);
  }

  inline void Madd(const Register& rd, const Register& rn, const Register& rm,
                   const Register& ra);
  inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
  inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
  inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
  inline void Msub(const Register& rd, const Register& rn, const Register& rm,
                   const Register& ra);

  inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
  inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
  inline void Umull(const Register& rd, const Register& rn, const Register& rm);
  inline void Smull(const Register& rd, const Register& rn, const Register& rm);

  inline void Sxtb(const Register& rd, const Register& rn);
  inline void Sxth(const Register& rd, const Register& rn);
  inline void Sxtw(const Register& rd, const Register& rn);
  inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
                    unsigned width);
  inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
                   unsigned width);
  inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
  inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
  inline void Ror(const Register& rd, const Register& rs, unsigned shift);
  inline void Ror(const Register& rd, const Register& rn, const Register& rm);
  inline void Cmn(const Register& rn, const Operand& operand);
  inline void Fadd(const VRegister& fd, const VRegister& fn,
                   const VRegister& fm);
  inline void Fcmp(const VRegister& fn, const VRegister& fm);
  inline void Fcmp(const VRegister& fn, double value);
  inline void Fabs(const VRegister& fd, const VRegister& fn);
  inline void Fmul(const VRegister& fd, const VRegister& fn,
                   const VRegister& fm);
  inline void Fsub(const VRegister& fd, const VRegister& fn,
                   const VRegister& fm);
  inline void Fdiv(const VRegister& fd, const VRegister& fn,
                   const VRegister& fm);
  inline void Fmax(const VRegister& fd, const VRegister& fn,
                   const VRegister& fm);
  inline void Fmin(const VRegister& fd, const VRegister& fn,
                   const VRegister& fm);
  inline void Rbit(const Register& rd, const Register& rn);
1087
  inline void Rev(const Register& rd, const Register& rn);
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104

  enum AdrHint {
    // The target must be within the immediate range of adr.
    kAdrNear,
    // The target may be outside of the immediate range of adr. Additional
    // instructions may be emitted.
    kAdrFar
  };
  void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);

  // Add/sub with carry macros.
  inline void Adc(const Register& rd, const Register& rn,
                  const Operand& operand);

  // Conditional macros.
  inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
                   Condition cond);
1105 1106
  inline void CcmpTagged(const Register& rn, const Operand& operand,
                         StatusFlags nzcv, Condition cond);
1107 1108 1109

  inline void Clz(const Register& rd, const Register& rn);

1110 1111
  // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
  // be 16 byte aligned.
1112 1113 1114 1115
  // When the optional template argument is kSignLR and control flow integrity
  // measures are enabled, we sign the link register before poking it onto the
  // stack. 'src' must be lr in this case.
  template <StoreLRMode lr_mode = kDontStoreLR>
1116 1117
  void Poke(const CPURegister& src, const Operand& offset);

1118 1119
  // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
  // The stack pointer must be aligned to 16 bytes.
1120 1121 1122 1123
  // When the optional template argument is kAuthLR and control flow integrity
  // measures are enabled, we authenticate the link register after peeking the
  // value. 'dst' must be lr in this case.
  template <LoadLRMode lr_mode = kDontLoadLR>
1124 1125
  void Peek(const CPURegister& dst, const Operand& offset);

1126
  // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
1127 1128
  // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
  // stack pointer must be 16 byte aligned.
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
  void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);

  inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
                   unsigned width);

  inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
                  unsigned width);

  inline void Scvtf(const VRegister& fd, const Register& rn,
                    unsigned fbits = 0);
  void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
    DCHECK(allow_macro_instructions());
    scvtf(vd, vn, fbits);
  }
  inline void Ucvtf(const VRegister& fd, const Register& rn,
                    unsigned fbits = 0);
  void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
    DCHECK(allow_macro_instructions());
    ucvtf(vd, vn, fbits);
  }

  void AssertFPCRState(Register fpcr = NoReg);
  void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
  void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }

1154
  inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1155
  inline void Cset(const Register& rd, Condition cond);
1156
  inline void Csetm(const Register& rd, Condition cond);
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
  inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
                    Condition cond);
  inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
                    Condition cond);

  inline void Fcvt(const VRegister& fd, const VRegister& fn);

  int ActivationFrameAlignment();

  void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
           int vn_index) {
    DCHECK(allow_macro_instructions());
    ins(vd, vd_index, vn, vn_index);
  }
  void Ins(const VRegister& vd, int vd_index, const Register& rn) {
    DCHECK(allow_macro_instructions());
    ins(vd, vd_index, rn);
  }

  inline void Bl(Label* label);
  inline void Br(const Register& xn);

  inline void Uxtb(const Register& rd, const Register& rn);
  inline void Uxth(const Register& rd, const Register& rn);
  inline void Uxtw(const Register& rd, const Register& rn);

  void Dup(const VRegister& vd, const VRegister& vn, int index) {
    DCHECK(allow_macro_instructions());
    dup(vd, vn, index);
  }
  void Dup(const VRegister& vd, const Register& rn) {
    DCHECK(allow_macro_instructions());
    dup(vd, rn);
  }

#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
  inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
  LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
  void St1(const VRegister& vt, const MemOperand& dst) {
    DCHECK(allow_macro_instructions());
    st1(vt, dst);
  }
  void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
    DCHECK(allow_macro_instructions());
    st1(vt, vt2, dst);
  }
  void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const MemOperand& dst) {
    DCHECK(allow_macro_instructions());
    st1(vt, vt2, vt3, dst);
  }
  void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const VRegister& vt4, const MemOperand& dst) {
    DCHECK(allow_macro_instructions());
    st1(vt, vt2, vt3, vt4, dst);
  }
  void St1(const VRegister& vt, int lane, const MemOperand& dst) {
    DCHECK(allow_macro_instructions());
    st1(vt, lane, dst);
  }

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
  V(rshrn, Rshrn)                      \
  V(rshrn2, Rshrn2)                    \
  V(shl, Shl)                          \
  V(shll, Shll)                        \
  V(shll2, Shll2)                      \
  V(shrn, Shrn)                        \
  V(shrn2, Shrn2)                      \
  V(sli, Sli)                          \
  V(sqrshrn, Sqrshrn)                  \
  V(sqrshrn2, Sqrshrn2)                \
  V(sqrshrun, Sqrshrun)                \
  V(sqrshrun2, Sqrshrun2)              \
  V(sqshl, Sqshl)                      \
  V(sqshlu, Sqshlu)                    \
  V(sqshrn, Sqshrn)                    \
  V(sqshrn2, Sqshrn2)                  \
  V(sqshrun, Sqshrun)                  \
  V(sqshrun2, Sqshrun2)                \
  V(sri, Sri)                          \
  V(srshr, Srshr)                      \
  V(srsra, Srsra)                      \
  V(sshll, Sshll)                      \
  V(sshll2, Sshll2)                    \
  V(sshr, Sshr)                        \
  V(ssra, Ssra)                        \
  V(uqrshrn, Uqrshrn)                  \
  V(uqrshrn2, Uqrshrn2)                \
  V(uqshl, Uqshl)                      \
  V(uqshrn, Uqshrn)                    \
  V(uqshrn2, Uqshrn2)                  \
  V(urshr, Urshr)                      \
  V(ursra, Ursra)                      \
  V(ushll, Ushll)                      \
  V(ushll2, Ushll2)                    \
  V(ushr, Ushr)                        \
  V(usra, Usra)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
  void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
    DCHECK(allow_macro_instructions());                            \
    ASM(vd, vn, shift);                                            \
  }
  NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC

  void Umov(const Register& rd, const VRegister& vn, int vn_index) {
    DCHECK(allow_macro_instructions());
    umov(rd, vn, vn_index);
  }
  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
    DCHECK(allow_macro_instructions());
    tbl(vd, vn, vm);
  }
  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
           const VRegister& vm) {
    DCHECK(allow_macro_instructions());
    tbl(vd, vn, vn2, vm);
  }
  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
           const VRegister& vn3, const VRegister& vm) {
    DCHECK(allow_macro_instructions());
    tbl(vd, vn, vn2, vn3, vm);
  }
  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
           const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
    DCHECK(allow_macro_instructions());
    tbl(vd, vn, vn2, vn3, vn4, vm);
  }
  void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
           int index) {
    DCHECK(allow_macro_instructions());
    ext(vd, vn, vm, index);
  }

  void Smov(const Register& rd, const VRegister& vn, int vn_index) {
    DCHECK(allow_macro_instructions());
    smov(rd, vn, vn_index);
  }

// Load-acquire/store-release macros.
#define DECLARE_FUNCTION(FN, OP) \
  inline void FN(const Register& rt, const Register& rn);
  LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

  // Load an object from the root table.
1307
  void LoadRoot(Register destination, RootIndex index) final;
1308
  void PushRoot(RootIndex index);
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322

  inline void Ret(const Register& xn = lr);

  // Perform a conversion from a double to a signed int64. If the input fits in
  // range of the 64-bit result, execution branches to done. Otherwise,
  // execution falls through, and the sign of the result can be used to
  // determine if overflow was towards positive or negative infinity.
  //
  // On successful conversion, the least significant 32 bits of the result are
  // equivalent to the ECMA-262 operation "ToInt32".
  void TryConvertDoubleToInt64(Register result, DoubleRegister input,
                               Label* done);

  inline void Mrs(const Register& rt, SystemRegister sysreg);
1323
  inline void Msr(SystemRegister sysreg, const Register& rt);
1324

1325 1326
  // Prologue claims an extra slot due to arm64's alignement constraints.
  static constexpr int kExtraSlotClaimedByPrologue = 1;
1327
  // Generates function prologue code.
1328
  void Prologue();
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

  void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
    DCHECK(allow_macro_instructions());
    cmgt(vd, vn, imm);
  }
  void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
    DCHECK(allow_macro_instructions());
    cmge(vd, vn, imm);
  }
  void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
    DCHECK(allow_macro_instructions());
    cmeq(vd, vn, imm);
  }
1342 1343 1344 1345
  void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
    DCHECK(allow_macro_instructions());
    cmlt(vd, vn, imm);
  }
1346 1347 1348 1349
  void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
    DCHECK(allow_macro_instructions());
    cmle(vd, vn, imm);
  }
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360

  inline void Neg(const Register& rd, const Operand& operand);
  inline void Negs(const Register& rd, const Operand& operand);

  // Compute rd = abs(rm).
  // This function clobbers the condition flags. On output the overflow flag is
  // set iff the negation overflowed.
  //
  // If rm is the minimum representable value, the result is not representable.
  // Handlers for each case can be specified using the relevant labels.
  void Abs(const Register& rd, const Register& rm,
1361 1362
           Label* is_not_representable = nullptr,
           Label* is_representable = nullptr);
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374

  inline void Cls(const Register& rd, const Register& rn);
  inline void Cneg(const Register& rd, const Register& rn, Condition cond);
  inline void Rev16(const Register& rd, const Register& rn);
  inline void Rev32(const Register& rd, const Register& rn);
  inline void Fcvtns(const Register& rd, const VRegister& fn);
  inline void Fcvtnu(const Register& rd, const VRegister& fn);
  inline void Fcvtms(const Register& rd, const VRegister& fn);
  inline void Fcvtmu(const Register& rd, const VRegister& fn);
  inline void Fcvtas(const Register& rd, const VRegister& fn);
  inline void Fcvtau(const Register& rd, const VRegister& fn);

1375 1376 1377 1378
  // Compute the start of the generated instruction stream from the current PC.
  // This is an alternative to embedding the {CodeObject} handle as a reference.
  void ComputeCodeStartAddress(const Register& rd);

1379
  // ---------------------------------------------------------------------------
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
  // Pointer compression Support

  // Loads a field containing a HeapObject and decompresses it if pointer
  // compression is enabled.
  void LoadTaggedPointerField(const Register& destination,
                              const MemOperand& field_operand);

  // Loads a field containing any tagged value and decompresses it if necessary.
  void LoadAnyTaggedField(const Register& destination,
                          const MemOperand& field_operand);
1390

1391 1392 1393 1394 1395
  // Loads a field containing a tagged signed value and decompresses it if
  // necessary.
  void LoadTaggedSignedField(const Register& destination,
                             const MemOperand& field_operand);

1396 1397 1398
  // Loads a field containing smi value and untags it.
  void SmiUntagField(Register dst, const MemOperand& src);

1399 1400 1401 1402
  // Compresses and stores tagged value to given on-heap location.
  void StoreTaggedField(const Register& value,
                        const MemOperand& dst_field_operand);

1403 1404 1405
  void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
                              const Register& dst_index, const Register& temp);

1406 1407 1408 1409
  void DecompressTaggedSigned(const Register& destination,
                              const MemOperand& field_operand);
  void DecompressTaggedPointer(const Register& destination,
                               const MemOperand& field_operand);
1410 1411
  void DecompressTaggedPointer(const Register& destination,
                               const Register& source);
1412 1413 1414
  void DecompressAnyTagged(const Register& destination,
                           const MemOperand& field_operand);

1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
  void AtomicDecompressTaggedSigned(const Register& destination,
                                    const Register& base, const Register& index,
                                    const Register& temp);
  void AtomicDecompressTaggedPointer(const Register& destination,
                                     const Register& base,
                                     const Register& index,
                                     const Register& temp);
  void AtomicDecompressAnyTagged(const Register& destination,
                                 const Register& base, const Register& index,
                                 const Register& temp);

1426 1427 1428 1429
  // Restore FP and LR from the values stored in the current frame. This will
  // authenticate the LR when pointer authentication is enabled.
  void RestoreFPAndLR();

1430
#if V8_ENABLE_WEBASSEMBLY
1431
  void StoreReturnAddressInWasmExitFrame(Label* return_location);
1432
#endif  // V8_ENABLE_WEBASSEMBLY
1433

1434 1435 1436 1437
  // Wasm helpers. These instructions don't have direct lowering
  // to native instructions. These helpers allow us to define the optimal code
  // sequence, and be used in both TurboFan and Liftoff.
  void PopcntHelper(Register dst, Register src);
1438
  void I64x2BitMask(Register dst, VRegister src);
1439
  void I64x2AllTrue(Register dst, VRegister src);
1440

1441 1442 1443
  // ---------------------------------------------------------------------------
  // V8 Heap sandbox support

Samuel Groß's avatar
Samuel Groß committed
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
  // Transform a CagedPointer from/to its encoded form, which is used when the
  // pointer is stored on the heap and ensures that the pointer will always
  // point into the virtual memory cage.
  void EncodeCagedPointer(const Register& value);
  void DecodeCagedPointer(const Register& value);

  // Load and decode a CagedPointer from the heap.
  void LoadCagedPointerField(const Register& destination,
                             const MemOperand& field_operand);
  // Encode and store a CagedPointer to the heap.
  void StoreCagedPointerField(const Register& value,
                              const MemOperand& dst_field_operand);

1457 1458 1459 1460 1461 1462
  // Loads a field containing off-heap pointer and does necessary decoding
  // if V8 heap sandbox is enabled.
  void LoadExternalPointerField(Register destination, MemOperand field_operand,
                                ExternalPointerTag tag,
                                Register isolate_root = Register::no_reg());

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
 protected:
  // The actual Push and Pop implementations. These don't generate any code
  // other than that required for the push or pop. This allows
  // (Push|Pop)CPURegList to bundle together run-time assertions for a large
  // block of registers.
  //
  // Note that size is per register, and is specified in bytes.
  void PushHelper(int count, int size, const CPURegister& src0,
                  const CPURegister& src1, const CPURegister& src2,
                  const CPURegister& src3);
  void PopHelper(int count, int size, const CPURegister& dst0,
                 const CPURegister& dst1, const CPURegister& dst2,
                 const CPURegister& dst3);

  void ConditionalCompareMacro(const Register& rn, const Operand& operand,
                               StatusFlags nzcv, Condition cond,
                               ConditionalCompareOp op);

  void AddSubWithCarryMacro(const Register& rd, const Register& rn,
                            const Operand& operand, FlagsUpdate S,
                            AddSubWithCarryOp op);

  // Call Printf. On a native build, a simple call will be generated, but if the
  // simulator is being used then a suitable pseudo-instruction is used. The
1487 1488
  // arguments and stack must be prepared by the caller as for a normal AAPCS64
  // call to 'printf'.
1489 1490 1491 1492
  //
  // The 'args' argument should point to an array of variable arguments in their
  // proper PCS registers (and in calling order). The argument registers can
  // have mixed types. The format string (x0) should not be included.
1493
  void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
1494 1495 1496 1497 1498 1499

 private:
#if DEBUG
  // Tell whether any of the macro instruction can be used. When false the
  // MacroAssembler will assert if a method which can emit a variable number
  // of instructions is called.
1500
  bool allow_macro_instructions_ = true;
1501
#endif
1502

1503
  // Scratch registers available for use by the MacroAssembler.
1504 1505
  CPURegList tmp_list_ = DefaultTmpList();
  CPURegList fptmp_list_ = DefaultFPTmpList();
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526

  // Helps resolve branching to labels potentially out of range.
  // If the label is not bound, it registers the information necessary to later
  // be able to emit a veneer for this branch if necessary.
  // If the label is bound, it returns true if the label (or the previous link
  // in the label chain) is out of range. In that case the caller is responsible
  // for generating appropriate code.
  // Otherwise it returns false.
  // This function also checks wether veneers need to be emitted.
  bool NeedExtraInstructionsOrRegisterBranch(Label* label,
                                             ImmBranchType branch_type);

  void Movi16bitHelper(const VRegister& vd, uint64_t imm);
  void Movi32bitHelper(const VRegister& vd, uint64_t imm);
  void Movi64bitHelper(const VRegister& vd, uint64_t imm);

  void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
                      LoadStoreOp op);

  void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
                          const MemOperand& addr, LoadStorePairOp op);
1527

1528 1529 1530
  int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
                                byte* pc);

1531
  void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
1532 1533
};

1534
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1535
 public:
1536
  using TurboAssembler::TurboAssembler;
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598

  // Instruction set functions ------------------------------------------------
  // Logical macros.
  inline void Bics(const Register& rd, const Register& rn,
                   const Operand& operand);

  inline void Adcs(const Register& rd, const Register& rn,
                   const Operand& operand);
  inline void Sbc(const Register& rd, const Register& rn,
                  const Operand& operand);
  inline void Sbcs(const Register& rd, const Register& rn,
                   const Operand& operand);
  inline void Ngc(const Register& rd, const Operand& operand);
  inline void Ngcs(const Register& rd, const Operand& operand);

  inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
                   Condition cond);

#define DECLARE_FUNCTION(FN, OP) \
  inline void FN(const Register& rs, const Register& rt, const Register& rn);
  STLX_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

  // Branch type inversion relies on these relations.
  STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
                (reg_bit_clear == (reg_bit_set ^ 1)) &&
                (always == (never ^ 1)));

  inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
                    unsigned width);
  inline void Cinc(const Register& rd, const Register& rn, Condition cond);
  inline void Cinv(const Register& rd, const Register& rn, Condition cond);
  inline void CzeroX(const Register& rd, Condition cond);
  inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
                    Condition cond);
  inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
                    Condition cond);
  inline void Extr(const Register& rd, const Register& rn, const Register& rm,
                   unsigned lsb);
  void Fcvtl(const VRegister& vd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    fcvtl(vd, vn);
  }
  void Fcvtl2(const VRegister& vd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    fcvtl2(vd, vn);
  }
  void Fcvtn(const VRegister& vd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    fcvtn(vd, vn);
  }
  void Fcvtn2(const VRegister& vd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    fcvtn2(vd, vn);
  }
  void Fcvtxn(const VRegister& vd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    fcvtxn(vd, vn);
  }
  void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
    DCHECK(allow_macro_instructions());
    fcvtxn2(vd, vn);
1599
  }
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
  inline void Fmadd(const VRegister& fd, const VRegister& fn,
                    const VRegister& fm, const VRegister& fa);
  inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
                     const VRegister& fm);
  inline void Fminnm(const VRegister& fd, const VRegister& fn,
                     const VRegister& fm);
  inline void Fmsub(const VRegister& fd, const VRegister& fn,
                    const VRegister& fm, const VRegister& fa);
  inline void Fnmadd(const VRegister& fd, const VRegister& fn,
                     const VRegister& fm, const VRegister& fa);
  inline void Fnmsub(const VRegister& fd, const VRegister& fn,
                     const VRegister& fm, const VRegister& fa);
  inline void Hint(SystemHint code);
  inline void Hlt(int code);
  inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
                   const MemOperand& src);
  inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
  inline void Nop() { nop(); }
  void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
            const int shift_amount = 0) {
    DCHECK(allow_macro_instructions());
    mvni(vd, imm8, shift, shift_amount);
1622
  }
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
  inline void Rev(const Register& rd, const Register& rn);
  inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
                    unsigned width);
  inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
                     const Register& ra);
  inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
                     const Register& ra);
  inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
  inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
                   const MemOperand& dst);
  inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
                     const Register& ra);
  inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
                     const Register& ra);

1638
  void Ld1(const VRegister& vt, const MemOperand& src) {
1639
    DCHECK(allow_macro_instructions());
1640 1641 1642
    ld1(vt, src);
  }
  void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1643
    DCHECK(allow_macro_instructions());
1644 1645 1646 1647
    ld1(vt, vt2, src);
  }
  void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const MemOperand& src) {
1648
    DCHECK(allow_macro_instructions());
1649 1650 1651 1652
    ld1(vt, vt2, vt3, src);
  }
  void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const VRegister& vt4, const MemOperand& src) {
1653
    DCHECK(allow_macro_instructions());
1654 1655 1656
    ld1(vt, vt2, vt3, vt4, src);
  }
  void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1657
    DCHECK(allow_macro_instructions());
1658 1659 1660
    ld1(vt, lane, src);
  }
  void Ld1r(const VRegister& vt, const MemOperand& src) {
1661
    DCHECK(allow_macro_instructions());
1662 1663 1664
    ld1r(vt, src);
  }
  void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1665
    DCHECK(allow_macro_instructions());
1666 1667 1668 1669
    ld2(vt, vt2, src);
  }
  void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
           const MemOperand& src) {
1670
    DCHECK(allow_macro_instructions());
1671 1672 1673
    ld2(vt, vt2, lane, src);
  }
  void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1674
    DCHECK(allow_macro_instructions());
1675 1676 1677 1678
    ld2r(vt, vt2, src);
  }
  void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const MemOperand& src) {
1679
    DCHECK(allow_macro_instructions());
1680 1681 1682 1683
    ld3(vt, vt2, vt3, src);
  }
  void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           int lane, const MemOperand& src) {
1684
    DCHECK(allow_macro_instructions());
1685 1686 1687 1688
    ld3(vt, vt2, vt3, lane, src);
  }
  void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const MemOperand& src) {
1689
    DCHECK(allow_macro_instructions());
1690 1691 1692 1693
    ld3r(vt, vt2, vt3, src);
  }
  void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const VRegister& vt4, const MemOperand& src) {
1694
    DCHECK(allow_macro_instructions());
1695 1696 1697 1698
    ld4(vt, vt2, vt3, vt4, src);
  }
  void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const VRegister& vt4, int lane, const MemOperand& src) {
1699
    DCHECK(allow_macro_instructions());
1700 1701 1702 1703
    ld4(vt, vt2, vt3, vt4, lane, src);
  }
  void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, const MemOperand& src) {
1704
    DCHECK(allow_macro_instructions());
1705 1706 1707
    ld4r(vt, vt2, vt3, vt4, src);
  }
  void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1708
    DCHECK(allow_macro_instructions());
1709 1710 1711 1712
    st2(vt, vt2, dst);
  }
  void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const MemOperand& dst) {
1713
    DCHECK(allow_macro_instructions());
1714 1715 1716 1717
    st3(vt, vt2, vt3, dst);
  }
  void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const VRegister& vt4, const MemOperand& dst) {
1718
    DCHECK(allow_macro_instructions());
1719 1720 1721 1722
    st4(vt, vt2, vt3, vt4, dst);
  }
  void St2(const VRegister& vt, const VRegister& vt2, int lane,
           const MemOperand& dst) {
1723
    DCHECK(allow_macro_instructions());
1724 1725 1726 1727
    st2(vt, vt2, lane, dst);
  }
  void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           int lane, const MemOperand& dst) {
1728
    DCHECK(allow_macro_instructions());
1729 1730 1731 1732
    st3(vt, vt2, vt3, lane, dst);
  }
  void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
           const VRegister& vt4, int lane, const MemOperand& dst) {
1733
    DCHECK(allow_macro_instructions());
1734 1735 1736
    st4(vt, vt2, vt3, vt4, lane, dst);
  }
  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1737
    DCHECK(allow_macro_instructions());
1738 1739 1740 1741
    tbx(vd, vn, vm);
  }
  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
           const VRegister& vm) {
1742
    DCHECK(allow_macro_instructions());
1743 1744 1745 1746
    tbx(vd, vn, vn2, vm);
  }
  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
           const VRegister& vn3, const VRegister& vm) {
1747
    DCHECK(allow_macro_instructions());
1748 1749 1750 1751
    tbx(vd, vn, vn2, vn3, vm);
  }
  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
           const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1752
    DCHECK(allow_macro_instructions());
1753 1754 1755
    tbx(vd, vn, vn2, vn3, vn4, vm);
  }

1756 1757 1758
  // For the 'lr_mode' template argument of the following methods, see
  // PushCPURegList/PopCPURegList.
  template <StoreLRMode lr_mode = kDontStoreLR>
1759 1760
  inline void PushSizeRegList(
      RegList registers, unsigned reg_size,
1761
      CPURegister::RegisterType type = CPURegister::kRegister) {
1762
    PushCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
1763
  }
1764
  template <LoadLRMode lr_mode = kDontLoadLR>
1765 1766
  inline void PopSizeRegList(
      RegList registers, unsigned reg_size,
1767
      CPURegister::RegisterType type = CPURegister::kRegister) {
1768
    PopCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
1769
  }
1770
  template <StoreLRMode lr_mode = kDontStoreLR>
1771
  inline void PushXRegList(RegList regs) {
1772
    PushSizeRegList<lr_mode>(regs, kXRegSizeInBits);
1773
  }
1774
  template <LoadLRMode lr_mode = kDontLoadLR>
1775
  inline void PopXRegList(RegList regs) {
1776
    PopSizeRegList<lr_mode>(regs, kXRegSizeInBits);
1777 1778
  }
  inline void PushWRegList(RegList regs) {
1779
    PushSizeRegList(regs, kWRegSizeInBits);
1780 1781
  }
  inline void PopWRegList(RegList regs) {
1782
    PopSizeRegList(regs, kWRegSizeInBits);
1783
  }
1784 1785 1786 1787 1788 1789
  inline void PushQRegList(RegList regs) {
    PushSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
  }
  inline void PopQRegList(RegList regs) {
    PopSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
  }
1790
  inline void PushDRegList(RegList regs) {
1791
    PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1792 1793
  }
  inline void PopDRegList(RegList regs) {
1794
    PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1795 1796
  }
  inline void PushSRegList(RegList regs) {
1797
    PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1798 1799
  }
  inline void PopSRegList(RegList regs) {
1800
    PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1801 1802 1803
  }

  // Push the specified register 'count' times.
1804
  void PushMultipleTimes(CPURegister src, Register count);
1805 1806 1807

  // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
  // values peeked will be adjacent, with the value in 'dst2' being from a
1808 1809
  // higher address than 'dst1'. The offset is in bytes. The stack pointer must
  // be aligned to 16 bytes.
1810 1811 1812 1813 1814 1815 1816 1817 1818
  void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);

  // Preserve the callee-saved registers (as defined by AAPCS64).
  //
  // Higher-numbered registers are pushed before lower-numbered registers, and
  // thus get higher addresses.
  // Floating-point registers are pushed before general-purpose registers, and
  // thus get higher addresses.
  //
1819 1820 1821
  // When control flow integrity measures are enabled, this method signs the
  // link register before pushing it.
  //
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
  // Note that registers are not checked for invalid values. Use this method
  // only if you know that the GC won't try to examine the values on the stack.
  void PushCalleeSavedRegisters();

  // Restore the callee-saved registers (as defined by AAPCS64).
  //
  // Higher-numbered registers are popped after lower-numbered registers, and
  // thus come from higher addresses.
  // Floating-point registers are popped after general-purpose registers, and
  // thus come from higher addresses.
1832 1833 1834
  //
  // When control flow integrity measures are enabled, this method
  // authenticates the link register after popping it.
1835 1836 1837
  void PopCalleeSavedRegisters();

  // Helpers ------------------------------------------------------------------
1838

1839
  template <typename Field>
1840
  void DecodeField(Register dst, Register src) {
1841 1842
    static const int shift = Field::kShift;
    static const int setbits = CountSetBits(Field::kMask, 32);
1843 1844 1845
    Ubfx(dst, src, shift, setbits);
  }

1846
  template <typename Field>
1847 1848
  void DecodeField(Register reg) {
    DecodeField<Field>(reg, reg);
1849 1850
  }

1851 1852
  Operand ReceiverOperand(const Register arg_count);

1853 1854 1855 1856 1857 1858 1859 1860
  // ---- SMI and Number Utilities ----

  inline void SmiTag(Register dst, Register src);
  inline void SmiTag(Register smi);

  inline void JumpIfNotSmi(Register value, Label* not_smi_label);

  // Abort execution if argument is a smi, enabled via --debug-code.
1861 1862
  void AssertNotSmi(Register object,
                    AbortReason reason = AbortReason::kOperandIsASmi);
1863

1864 1865 1866
  // Abort execution if argument is not a CodeT, enabled via --debug-code.
  void AssertCodeT(Register object);

1867 1868 1869
  // Abort execution if argument is not a Constructor, enabled via --debug-code.
  void AssertConstructor(Register object);

1870 1871 1872
  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
  void AssertFunction(Register object);

1873 1874 1875 1876
  // Abort execution if argument is not a callable JSFunction, enabled via
  // --debug-code.
  void AssertCallableFunction(Register object);

1877
  // Abort execution if argument is not a JSGeneratorObject (or subclass),
1878
  // enabled via --debug-code.
1879
  void AssertGeneratorObject(Register object);
1880

1881 1882 1883 1884
  // Abort execution if argument is not a JSBoundFunction,
  // enabled via --debug-code.
  void AssertBoundFunction(Register object);

1885 1886
  // Abort execution if argument is not undefined or an AllocationSite, enabled
  // via --debug-code.
1887
  void AssertUndefinedOrAllocationSite(Register object);
1888

1889 1890
  // ---- Calling / Jumping helpers ----

1891
  void CallRuntime(const Runtime::Function* f, int num_arguments,
1892
                   SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
1893

1894 1895
  // Convenience function: Same as above, but takes the fid instead.
  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1896
                   SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1897
    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1898 1899
  }

1900 1901
  // Convenience function: Same as above, but takes the fid instead.
  void CallRuntime(Runtime::FunctionId fid,
1902
                   SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1903 1904 1905 1906 1907
    const Runtime::Function* function = Runtime::FunctionForId(fid);
    CallRuntime(function, function->nargs, save_doubles);
  }

  void TailCallRuntime(Runtime::FunctionId fid);
1908 1909

  // Jump to a runtime routine.
1910 1911
  void JumpToExternalReference(const ExternalReference& builtin,
                               bool builtin_exit_frame = false);
1912

1913
  // Generates a trampoline to jump to the off-heap instruction stream.
1914
  void JumpToOffHeapInstructionStream(Address entry);
1915

1916 1917 1918 1919 1920 1921 1922
  // Registers used through the invocation chain are hard-coded.
  // We force passing the parameters to ensure the contracts are correctly
  // honoured by the caller.
  // 'function' must be x1.
  // 'actual' must use an immediate or x0.
  // 'expected' must use an immediate or x2.
  // 'call_kind' must be x5.
1923 1924
  void InvokePrologue(Register expected_parameter_count,
                      Register actual_parameter_count, Label* done,
1925
                      InvokeType type);
1926

1927 1928
  // On function call, call into the debugger.
  void CallDebugOnFunctionCall(Register fun, Register new_target,
1929 1930
                               Register expected_parameter_count,
                               Register actual_parameter_count);
1931
  void InvokeFunctionCode(Register function, Register new_target,
1932
                          Register expected_parameter_count,
1933
                          Register actual_parameter_count, InvokeType type);
1934 1935
  // Invoke the JavaScript function in the given register.
  // Changes the current context to the context in the function before invoking.
1936 1937
  void InvokeFunctionWithNewTarget(Register function, Register new_target,
                                   Register actual_parameter_count,
1938
                                   InvokeType type);
1939
  void InvokeFunction(Register function, Register expected_parameter_count,
1940
                      Register actual_parameter_count, InvokeType type);
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953

  // ---- Code generation helpers ----

  // ---------------------------------------------------------------------------
  // Support functions.

  // Compare object type for heap object.  heap_object contains a non-Smi
  // whose object type should be compared with the given type.  This both
  // sets the flags and leaves the object type in the type_reg register.
  // It leaves the map in the map register (unless the type_reg and map register
  // are the same register).  It leaves the heap object in the heap_object
  // register unless the heap_object register is the same register as one of the
  // other registers.
1954
  void CompareObjectType(Register heap_object, Register map, Register type_reg,
1955 1956 1957 1958 1959 1960 1961 1962 1963
                         InstanceType type);

  // Compare object type for heap object, and branch if equal (or not.)
  // heap_object contains a non-Smi whose object type should be compared with
  // the given type.  This both sets the flags and leaves the object type in
  // the type_reg register. It leaves the map in the map register (unless the
  // type_reg and map register are the same register).  It leaves the heap
  // object in the heap_object register unless the heap_object register is the
  // same register as one of the other registers.
1964 1965
  void JumpIfObjectType(Register object, Register map, Register type_reg,
                        InstanceType type, Label* if_cond_pass,
1966 1967 1968 1969 1970
                        Condition cond = eq);

  // Compare instance type in a map.  map contains a valid map object whose
  // object type should be compared with the given type.  This both
  // sets the flags and leaves the object type in the type_reg register.
1971
  void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1972

1973 1974 1975 1976 1977 1978 1979 1980
  // Compare instance type ranges for a map (lower_limit and higher_limit
  // inclusive).
  //
  // Always use unsigned comparisons: ls for a positive result.
  void CompareInstanceTypeRange(Register map, Register type_reg,
                                InstanceType lower_limit,
                                InstanceType higher_limit);

1981
  // Load the elements kind field from a map, and return it in the result
1982
  // register.
1983
  void LoadElementsKindFromMap(Register result, Register map);
1984 1985

  // Compare the object in a register to a value from the root list.
1986
  void CompareRoot(const Register& obj, RootIndex index);
1987 1988

  // Compare the object in a register to a value and jump if they are equal.
1989
  void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
1990 1991

  // Compare the object in a register to a value and jump if they are not equal.
1992
  void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
1993

1994 1995 1996 1997 1998
  // Checks if value is in range [lower_limit, higher_limit] using a single
  // comparison.
  void JumpIfIsInRange(const Register& value, unsigned lower_limit,
                       unsigned higher_limit, Label* on_in_range);

1999 2000 2001 2002 2003 2004 2005 2006 2007
  // ---------------------------------------------------------------------------
  // Frames.

  void ExitFramePreserveFPRegs();
  void ExitFrameRestoreFPRegs();

  // Enter exit frame. Exit frames are used when calling C code from generated
  // (JavaScript) code.
  //
2008 2009
  // The only registers modified by this function are the provided scratch
  // register, the frame pointer and the stack pointer.
2010 2011 2012 2013 2014 2015 2016 2017
  //
  // The 'extra_space' argument can be used to allocate some space in the exit
  // frame that will be ignored by the GC. This space will be reserved in the
  // bottom of the frame immediately above the return address slot.
  //
  // Set up a stack frame and registers as follows:
  //         fp[8]: CallerPC (lr)
  //   fp -> fp[0]: CallerFP (old fp)
2018
  //         fp[-8]: SPOffset (new sp)
2019 2020
  //         fp[-16]: CodeObject()
  //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
2021
  //         sp[8]: Memory reserved for the caller if extra_space != 0.
2022
  //                 Alignment padding, if necessary.
2023
  //   sp -> sp[0]: Space reserved for the return address.
2024 2025 2026
  //
  // This function also stores the new frame information in the top frame, so
  // that the new frame becomes the current frame.
2027 2028 2029
  void EnterExitFrame(bool save_doubles, const Register& scratch,
                      int extra_space = 0,
                      StackFrame::Type frame_type = StackFrame::EXIT);
2030 2031 2032 2033 2034 2035 2036 2037

  // Leave the current exit frame, after a C function has returned to generated
  // (JavaScript) code.
  //
  // This effectively unwinds the operation of EnterExitFrame:
  //  * Preserved doubles are restored (if restore_doubles is true).
  //  * The frame information is removed from the top frame.
  //  * The exit frame is dropped.
2038 2039
  void LeaveExitFrame(bool save_doubles, const Register& scratch,
                      const Register& scratch2);
2040

2041
  // Load the global proxy from the current context.
2042
  void LoadGlobalProxy(Register dst);
2043

2044 2045 2046 2047
  // ---------------------------------------------------------------------------
  // In-place weak references.
  void LoadWeakValue(Register out, Register in, Label* target_if_cleared);

2048 2049 2050 2051
  // ---------------------------------------------------------------------------
  // StatsCounter support

  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
2052 2053 2054 2055 2056 2057
                        Register scratch2) {
    if (!FLAG_native_code_counters) return;
    EmitIncrementCounter(counter, value, scratch1, scratch2);
  }
  void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
                            Register scratch2);
2058
  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
2059 2060 2061 2062
                        Register scratch2) {
    if (!FLAG_native_code_counters) return;
    EmitIncrementCounter(counter, -value, scratch1, scratch2);
  }
2063

2064 2065 2066 2067 2068
  // ---------------------------------------------------------------------------
  // Stack limit utilities
  void LoadStackLimit(Register destination, StackLimitKind kind);
  void StackOverflowCheck(Register num_args, Label* stack_overflow);

2069 2070 2071 2072 2073
  // ---------------------------------------------------------------------------
  // Garbage collector support (GC).

  // Notify the garbage collector that we wrote a pointer into an object.
  // |object| is the object being stored into, |value| is the object being
2074
  // stored.
2075
  // The offset is the offset from the start of the object, not the offset from
2076
  // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
2077
  void RecordWriteField(
2078 2079
      Register object, int offset, Register value, LinkRegisterStatus lr_status,
      SaveFPRegsMode save_fp,
2080 2081
      RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
      SmiCheck smi_check = SmiCheck::kInline);
2082

2083 2084
  // For a given |object| notify the garbage collector that the slot at |offset|
  // has been written. |value| is the object being stored.
2085
  void RecordWrite(
2086
      Register object, Operand offset, Register value,
2087
      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
2088 2089
      RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
      SmiCheck smi_check = SmiCheck::kInline);
2090 2091 2092 2093

  // ---------------------------------------------------------------------------
  // Debugging.

2094
  void LoadNativeContextSlot(Register dst, int index);
2095

2096
  DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
2097 2098
};

2099
// Use this scope when you need a one-to-one mapping between methods and
2100 2101 2102
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
// emitted is what you specified when creating the scope.
2103
class V8_NODISCARD InstructionAccurateScope {
2104
 public:
2105
  explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
2106 2107
      : tasm_(tasm),
        block_pool_(tasm, count * kInstrSize)
2108 2109
#ifdef DEBUG
        ,
2110
        size_(count * kInstrSize)
2111 2112
#endif
  {
2113 2114
    tasm_->CheckVeneerPool(false, true, count * kInstrSize);
    tasm_->StartBlockVeneerPool();
2115
#ifdef DEBUG
2116
    if (count != 0) {
2117
      tasm_->bind(&start_);
2118
    }
2119 2120
    previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
    tasm_->set_allow_macro_instructions(false);
2121 2122 2123 2124
#endif
  }

  ~InstructionAccurateScope() {
2125
    tasm_->EndBlockVeneerPool();
2126 2127
#ifdef DEBUG
    if (start_.is_bound()) {
2128
      DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
2129
    }
2130
    tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2131 2132 2133 2134
#endif
  }

 private:
2135
  TurboAssembler* tasm_;
2136
  TurboAssembler::BlockConstPoolScope block_pool_;
2137
#ifdef DEBUG
2138
  size_t size_;
2139 2140 2141 2142 2143
  Label start_;
  bool previous_allow_macro_instructions_;
#endif
};

2144
// This scope utility allows scratch registers to be managed safely. The
2145
// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2146 2147 2148 2149
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
// When the scope ends, the MacroAssembler's lists will be restored to their
2150 2151 2152
// original state, even if the lists were modified by some other means. Note
// that this scope can be nested but the destructors need to run in the opposite
// order as the constructors. We do not have assertions for this.
2153
class V8_NODISCARD UseScratchRegisterScope {
2154
 public:
2155 2156 2157
  explicit UseScratchRegisterScope(TurboAssembler* tasm)
      : available_(tasm->TmpList()),
        availablefp_(tasm->FPTmpList()),
2158 2159
        old_available_(available_->list()),
        old_availablefp_(availablefp_->list()) {
2160 2161
    DCHECK_EQ(available_->type(), CPURegister::kRegister);
    DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2162 2163
  }

2164
  V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
2165 2166 2167 2168 2169

  // Take a register from the appropriate temps list. It will be returned
  // automatically when the scope ends.
  Register AcquireW() { return AcquireNextAvailable(available_).W(); }
  Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2170 2171
  VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
  VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2172
  VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
2173 2174 2175
  VRegister AcquireV(VectorFormat format) {
    return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
  }
2176 2177

  Register AcquireSameSizeAs(const Register& reg);
2178
  V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
2179

2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
  void Include(const CPURegList& list) { available_->Combine(list); }
  void Exclude(const CPURegList& list) {
#if DEBUG
    CPURegList copy(list);
    while (!copy.IsEmpty()) {
      const CPURegister& reg = copy.PopHighestIndex();
      DCHECK(available_->IncludesAliasOf(reg));
    }
#endif
    available_->Remove(list);
  }
2191
  void Include(const Register& reg1, const Register& reg2 = NoReg) {
2192 2193 2194
    CPURegList list(reg1, reg2);
    Include(list);
  }
2195
  void Exclude(const Register& reg1, const Register& reg2 = NoReg) {
2196 2197 2198 2199
    CPURegList list(reg1, reg2);
    Exclude(list);
  }

2200
 private:
2201 2202
  V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
      CPURegList* available);
2203 2204

  // Available scratch registers.
2205 2206
  CPURegList* available_;    // kRegister
  CPURegList* availablefp_;  // kVRegister
2207 2208

  // The state of the available lists at the start of this scope.
2209 2210
  RegList old_available_;    // kRegister
  RegList old_availablefp_;  // kVRegister
2211 2212
};

2213 2214
}  // namespace internal
}  // namespace v8
2215 2216 2217

#define ACCESS_MASM(masm) masm->

2218
#endif  // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_