macro-assembler-arm64.cc 169 KB
Newer Older
1
// Copyright 2013 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#if V8_TARGET_ARCH_ARM64
6

7
#include "src/base/bits.h"
8
#include "src/base/division-by-constant.h"
9 10
#include "src/bootstrapper.h"
#include "src/codegen.h"
11
#include "src/debug/debug.h"
12
#include "src/register-configuration.h"
13
#include "src/runtime/runtime.h"
14

15 16 17
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"

18 19 20 21 22 23 24
namespace v8 {
namespace internal {

// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
#define __


25 26 27
MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
                               unsigned buffer_size,
                               CodeObjectRequired create_code_object)
28 29 30 31 32 33 34
    : Assembler(arg_isolate, buffer, buffer_size),
      generating_stub_(false),
#if DEBUG
      allow_macro_instructions_(true),
#endif
      has_frame_(false),
      use_real_aborts_(true),
35 36 37
      sp_(jssp),
      tmp_list_(DefaultTmpList()),
      fptmp_list_(DefaultFPTmpList()) {
38
  if (create_code_object == CodeObjectRequired::kYes) {
39 40
    code_object_ =
        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
41 42 43 44
  }
}


45 46 47 48 49 50 51 52 53 54
CPURegList MacroAssembler::DefaultTmpList() {
  return CPURegList(ip0, ip1);
}


CPURegList MacroAssembler::DefaultFPTmpList() {
  return CPURegList(fp_scratch1, fp_scratch2);
}


55 56 57 58
void MacroAssembler::LogicalMacro(const Register& rd,
                                  const Register& rn,
                                  const Operand& operand,
                                  LogicalOp op) {
59 60
  UseScratchRegisterScope temps(this);

61
  if (operand.NeedsRelocation(this)) {
62
    Register temp = temps.AcquireX();
63
    Ldr(temp, operand.immediate());
64
    Logical(rd, rn, temp, op);
65 66

  } else if (operand.IsImmediate()) {
67
    int64_t immediate = operand.ImmediateValue();
68 69 70 71 72 73 74 75
    unsigned reg_size = rd.SizeInBits();

    // If the operation is NOT, invert the operation and immediate.
    if ((op & NOT) == NOT) {
      op = static_cast<LogicalOp>(op & ~NOT);
      immediate = ~immediate;
    }

76 77 78
    // Ignore the top 32 bits of an immediate if we're moving to a W register.
    if (rd.Is32Bits()) {
      // Check that the top 32 bits are consistent.
79
      DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 81 82 83
             ((immediate >> kWRegSizeInBits) == -1));
      immediate &= kWRegMask;
    }

84
    DCHECK(rd.Is64Bits() || is_uint32(immediate));
85

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
    // Special cases for all set or all clear immediates.
    if (immediate == 0) {
      switch (op) {
        case AND:
          Mov(rd, 0);
          return;
        case ORR:  // Fall through.
        case EOR:
          Mov(rd, rn);
          return;
        case ANDS:  // Fall through.
        case BICS:
          break;
        default:
          UNREACHABLE();
      }
    } else if ((rd.Is64Bits() && (immediate == -1L)) ||
               (rd.Is32Bits() && (immediate == 0xffffffffL))) {
      switch (op) {
        case AND:
          Mov(rd, rn);
          return;
        case ORR:
          Mov(rd, immediate);
          return;
        case EOR:
          Mvn(rd, rn);
          return;
        case ANDS:  // Fall through.
        case BICS:
          break;
        default:
          UNREACHABLE();
      }
    }

    unsigned n, imm_s, imm_r;
    if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
      // Immediate can be encoded in the instruction.
      LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
    } else {
      // Immediate can't be encoded: synthesize using move immediate.
128
      Register temp = temps.AcquireSameSizeAs(rn);
129
      Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
130 131 132
      if (rd.Is(csp)) {
        // If rd is the stack pointer we cannot use it as the destination
        // register so we use the temp register as an intermediate again.
133
        Logical(temp, rn, imm_operand, op);
134
        Mov(csp, temp);
135
        AssertStackConsistency();
136
      } else {
137
        Logical(rd, rn, imm_operand, op);
138 139 140 141
      }
    }

  } else if (operand.IsExtendedRegister()) {
142
    DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 144
    // Add/sub extended supports shift <= 4. We want to support exactly the
    // same modes here.
145 146
    DCHECK(operand.shift_amount() <= 4);
    DCHECK(operand.reg().Is64Bits() ||
147
           ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148
    Register temp = temps.AcquireSameSizeAs(rn);
149 150 151 152 153 154
    EmitExtendShift(temp, operand.reg(), operand.extend(),
                    operand.shift_amount());
    Logical(rd, rn, temp, op);

  } else {
    // The operand can be encoded in the instruction.
155
    DCHECK(operand.IsShiftedRegister());
156 157 158 159 160 161
    Logical(rd, rn, operand, op);
  }
}


void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 163 164
  DCHECK(allow_macro_instructions_);
  DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
  DCHECK(!rd.IsZero());
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

  // TODO(all) extend to support more immediates.
  //
  // Immediates on Aarch64 can be produced using an initial value, and zero to
  // three move keep operations.
  //
  // Initial values can be generated with:
  //  1. 64-bit move zero (movz).
  //  2. 32-bit move inverted (movn).
  //  3. 64-bit move inverted.
  //  4. 32-bit orr immediate.
  //  5. 64-bit orr immediate.
  // Move-keep may then be used to modify each of the 16-bit half-words.
  //
  // The code below supports all five initial value generators, and
  // applying move-keep operations to move-zero and move-inverted initial
  // values.

183 184 185 186 187
  // Try to move the immediate in one instruction, and if that fails, switch to
  // using multiple instructions.
  if (!TryOneInstrMoveImmediate(rd, imm)) {
    unsigned reg_size = rd.SizeInBits();

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
    // Generic immediate case. Imm will be represented by
    //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
    // A move-zero or move-inverted is generated for the first non-zero or
    // non-0xffff immX, and a move-keep for subsequent non-zero immX.

    uint64_t ignored_halfword = 0;
    bool invert_move = false;
    // If the number of 0xffff halfwords is greater than the number of 0x0000
    // halfwords, it's more efficient to use move-inverted.
    if (CountClearHalfWords(~imm, reg_size) >
        CountClearHalfWords(imm, reg_size)) {
      ignored_halfword = 0xffffL;
      invert_move = true;
    }

203 204 205 206
    // Mov instructions can't move immediate values into the stack pointer, so
    // set up a temporary register, if needed.
    UseScratchRegisterScope temps(this);
    Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
207 208 209

    // Iterate through the halfwords. Use movn/movz for the first non-ignored
    // halfword, and movk for subsequent halfwords.
210
    DCHECK((reg_size % 16) == 0);
211
    bool first_mov_done = false;
212
    for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
      uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
      if (imm16 != ignored_halfword) {
        if (!first_mov_done) {
          if (invert_move) {
            movn(temp, (~imm16) & 0xffffL, 16 * i);
          } else {
            movz(temp, imm16, 16 * i);
          }
          first_mov_done = true;
        } else {
          // Construct a wider constant.
          movk(temp, imm16, 16 * i);
        }
      }
    }
228
    DCHECK(first_mov_done);
229 230 231 232 233

    // Move the temporary if the original destination register was the stack
    // pointer.
    if (rd.IsSP()) {
      mov(rd, temp);
234
      AssertStackConsistency();
235 236 237 238 239 240 241 242
    }
  }
}


void MacroAssembler::Mov(const Register& rd,
                         const Operand& operand,
                         DiscardMoveMode discard_mode) {
243 244
  DCHECK(allow_macro_instructions_);
  DCHECK(!rd.IsZero());
245

246 247
  // Provide a swap register for instructions that need to write into the
  // system stack pointer (and can't do this inherently).
248 249
  UseScratchRegisterScope temps(this);
  Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250

251
  if (operand.NeedsRelocation(this)) {
252
    Ldr(dst, operand.immediate());
253 254 255

  } else if (operand.IsImmediate()) {
    // Call the macro assembler for generic immediates.
256
    Mov(dst, operand.ImmediateValue());
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
    // Emit a shift instruction if moving a shifted register. This operation
    // could also be achieved using an orr instruction (like orn used by Mvn),
    // but using a shift instruction makes the disassembly clearer.
    EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());

  } else if (operand.IsExtendedRegister()) {
    // Emit an extend instruction if moving an extended register. This handles
    // extend with post-shift operations, too.
    EmitExtendShift(dst, operand.reg(), operand.extend(),
                    operand.shift_amount());

  } else {
    // Otherwise, emit a register move only if the registers are distinct, or
    // if they are not X registers.
    //
    // Note that mov(w0, w0) is not a no-op because it clears the top word of
    // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
    // registers is not required to clear the top word of the X register. In
    // this case, the instruction is discarded.
    //
    // If csp is an operand, add #0 is emitted, otherwise, orr #0.
    if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
                                  (discard_mode == kDontDiscardForSameWReg))) {
      Assembler::mov(rd, operand.reg());
    }
    // This case can handle writes into the system stack pointer directly.
    dst = rd;
  }

  // Copy the result to the system stack pointer.
  if (!dst.Is(rd)) {
290
    DCHECK(rd.IsSP());
291 292 293 294 295 296
    Assembler::mov(rd, dst);
  }
}


void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297
  DCHECK(allow_macro_instructions_);
298

299
  if (operand.NeedsRelocation(this)) {
300
    Ldr(rd, operand.immediate());
301
    mvn(rd, rd);
302 303 304

  } else if (operand.IsImmediate()) {
    // Call the macro assembler for generic immediates.
305
    Mov(rd, ~operand.ImmediateValue());
306 307 308 309

  } else if (operand.IsExtendedRegister()) {
    // Emit two instructions for the extend case. This differs from Mov, as
    // the extend and invert can't be achieved in one instruction.
310
    EmitExtendShift(rd, operand.reg(), operand.extend(),
311
                    operand.shift_amount());
312
    mvn(rd, rd);
313 314 315 316 317 318 319 320

  } else {
    mvn(rd, operand);
  }
}


unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321
  DCHECK((reg_size % 8) == 0);
322 323 324 325 326 327 328 329 330 331 332 333 334 335
  int count = 0;
  for (unsigned i = 0; i < (reg_size / 16); i++) {
    if ((imm & 0xffff) == 0) {
      count++;
    }
    imm >>= 16;
  }
  return count;
}


// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336
  DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
  return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}


// The movn instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
  return IsImmMovz(~imm, reg_size);
}


void MacroAssembler::ConditionalCompareMacro(const Register& rn,
                                             const Operand& operand,
                                             StatusFlags nzcv,
                                             Condition cond,
                                             ConditionalCompareOp op) {
353
  DCHECK((cond != al) && (cond != nv));
354
  if (operand.NeedsRelocation(this)) {
355 356
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();
357
    Ldr(temp, operand.immediate());
358
    ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359 360

  } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 362
             (operand.IsImmediate() &&
              IsImmConditionalCompare(operand.ImmediateValue()))) {
363 364 365 366 367 368 369
    // The immediate can be encoded in the instruction, or the operand is an
    // unshifted register: call the assembler.
    ConditionalCompare(rn, operand, nzcv, cond, op);

  } else {
    // The operand isn't directly supported by the instruction: perform the
    // operation on a temporary register.
370 371
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireSameSizeAs(rn);
372 373 374 375 376 377 378 379 380 381
    Mov(temp, operand);
    ConditionalCompare(rn, temp, nzcv, cond, op);
  }
}


void MacroAssembler::Csel(const Register& rd,
                          const Register& rn,
                          const Operand& operand,
                          Condition cond) {
382 383 384
  DCHECK(allow_macro_instructions_);
  DCHECK(!rd.IsZero());
  DCHECK((cond != al) && (cond != nv));
385 386 387
  if (operand.IsImmediate()) {
    // Immediate argument. Handle special cases of 0, 1 and -1 using zero
    // register.
388
    int64_t imm = operand.ImmediateValue();
389 390 391 392 393 394 395 396
    Register zr = AppropriateZeroRegFor(rn);
    if (imm == 0) {
      csel(rd, rn, zr, cond);
    } else if (imm == 1) {
      csinc(rd, rn, zr, cond);
    } else if (imm == -1) {
      csinv(rd, rn, zr, cond);
    } else {
397 398
      UseScratchRegisterScope temps(this);
      Register temp = temps.AcquireSameSizeAs(rn);
399
      Mov(temp, imm);
400 401 402 403 404 405 406
      csel(rd, rn, temp, cond);
    }
  } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
    // Unshifted register argument.
    csel(rd, rn, operand.reg(), cond);
  } else {
    // All other arguments.
407 408
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireSameSizeAs(rn);
409 410 411 412 413 414
    Mov(temp, operand);
    csel(rd, rn, temp, cond);
  }
}


415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
                                              int64_t imm) {
  unsigned n, imm_s, imm_r;
  int reg_size = dst.SizeInBits();
  if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
    // Immediate can be represented in a move zero instruction. Movz can't write
    // to the stack pointer.
    movz(dst, imm);
    return true;
  } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
    // Immediate can be represented in a move not instruction. Movn can't write
    // to the stack pointer.
    movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
    return true;
  } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
    // Immediate can be represented in a logical orr instruction.
    LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
    return true;
  }
  return false;
}


Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
                                                  int64_t imm) {
  int reg_size = dst.SizeInBits();

  // Encode the immediate in a single move instruction, if possible.
  if (TryOneInstrMoveImmediate(dst, imm)) {
    // The move was successful; nothing to do here.
  } else {
    // Pre-shift the immediate to the least-significant bits of the register.
    int shift_low = CountTrailingZeros(imm, reg_size);
    int64_t imm_low = imm >> shift_low;

    // Pre-shift the immediate to the most-significant bits of the register. We
    // insert set bits in the least-significant bits, as this creates a
    // different immediate that may be encodable using movn or orr-immediate.
    // If this new immediate is encodable, the set bits will be eliminated by
    // the post shift on the following instruction.
    int shift_high = CountLeadingZeros(imm, reg_size);
    int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);

    if (TryOneInstrMoveImmediate(dst, imm_low)) {
      // The new immediate has been moved into the destination's low bits:
      // return a new leftward-shifting operand.
      return Operand(dst, LSL, shift_low);
    } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
      // The new immediate has been moved into the destination's high bits:
      // return a new rightward-shifting operand.
      return Operand(dst, LSR, shift_high);
    } else {
      // Use the generic move operation to set up the immediate.
      Mov(dst, imm);
    }
  }
  return Operand(dst);
}


475 476 477 478 479 480
void MacroAssembler::AddSubMacro(const Register& rd,
                                 const Register& rn,
                                 const Operand& operand,
                                 FlagsUpdate S,
                                 AddSubOp op) {
  if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481
      !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 483 484 485
    // The instruction would be a nop. Avoid generating useless code.
    return;
  }

486
  if (operand.NeedsRelocation(this)) {
487 488
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();
489
    Ldr(temp, operand.immediate());
490
    AddSubMacro(rd, rn, temp, S, op);
491 492 493
  } else if ((operand.IsImmediate() &&
              !IsImmAddSub(operand.ImmediateValue()))      ||
             (rn.IsZero() && !operand.IsShiftedRegister()) ||
494
             (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 496
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireSameSizeAs(rn);
497 498 499 500 501 502 503 504
    if (operand.IsImmediate()) {
      Operand imm_operand =
          MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
      AddSub(rd, rn, imm_operand, S, op);
    } else {
      Mov(temp, operand);
      AddSub(rd, rn, temp, S, op);
    }
505 506 507 508 509 510 511 512 513 514 515
  } else {
    AddSub(rd, rn, operand, S, op);
  }
}


void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
                                          const Register& rn,
                                          const Operand& operand,
                                          FlagsUpdate S,
                                          AddSubWithCarryOp op) {
516
  DCHECK(rd.SizeInBits() == rn.SizeInBits());
517
  UseScratchRegisterScope temps(this);
518

519
  if (operand.NeedsRelocation(this)) {
520
    Register temp = temps.AcquireX();
521
    Ldr(temp, operand.immediate());
522
    AddSubWithCarryMacro(rd, rn, temp, S, op);
523 524 525 526

  } else if (operand.IsImmediate() ||
             (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
    // Add/sub with carry (immediate or ROR shifted register.)
527
    Register temp = temps.AcquireSameSizeAs(rn);
528 529
    Mov(temp, operand);
    AddSubWithCarry(rd, rn, temp, S, op);
530

531 532
  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
    // Add/sub with carry (shifted register).
533 534 535
    DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
    DCHECK(operand.shift() != ROR);
    DCHECK(is_uintn(operand.shift_amount(),
536 537
          rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
                                             : kWRegSizeInBitsLog2));
538
    Register temp = temps.AcquireSameSizeAs(rn);
539 540 541 542 543
    EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
    AddSubWithCarry(rd, rn, temp, S, op);

  } else if (operand.IsExtendedRegister()) {
    // Add/sub with carry (extended register).
544
    DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 546
    // Add/sub extended supports a shift <= 4. We want to support exactly the
    // same modes.
547 548
    DCHECK(operand.shift_amount() <= 4);
    DCHECK(operand.reg().Is64Bits() ||
549
           ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550
    Register temp = temps.AcquireSameSizeAs(rn);
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
    EmitExtendShift(temp, operand.reg(), operand.extend(),
                    operand.shift_amount());
    AddSubWithCarry(rd, rn, temp, S, op);

  } else {
    // The addressing mode is directly supported by the instruction.
    AddSubWithCarry(rd, rn, operand, S, op);
  }
}


void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
                                    const MemOperand& addr,
                                    LoadStoreOp op) {
  int64_t offset = addr.offset();
  LSDataSize size = CalcLSDataSize(op);

  // Check if an immediate offset fits in the immediate field of the
  // appropriate instruction. If not, emit two instructions to perform
  // the operation.
  if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
      !IsImmLSUnscaled(offset)) {
    // Immediate offset that can't be encoded using unsigned or unscaled
    // addressing modes.
575 576
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireSameSizeAs(addr.base());
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
    Mov(temp, addr.offset());
    LoadStore(rt, MemOperand(addr.base(), temp), op);
  } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
    // Post-index beyond unscaled addressing range.
    LoadStore(rt, MemOperand(addr.base()), op);
    add(addr.base(), addr.base(), offset);
  } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
    // Pre-index beyond unscaled addressing range.
    add(addr.base(), addr.base(), offset);
    LoadStore(rt, MemOperand(addr.base()), op);
  } else {
    // Encodable in one load/store instruction.
    LoadStore(rt, addr, op);
  }
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
                                        const CPURegister& rt2,
                                        const MemOperand& addr,
                                        LoadStorePairOp op) {
  // TODO(all): Should we support register offset for load-store-pair?
  DCHECK(!addr.IsRegisterOffset());

  int64_t offset = addr.offset();
  LSDataSize size = CalcLSPairDataSize(op);

  // Check if the offset fits in the immediate field of the appropriate
  // instruction. If not, emit two instructions to perform the operation.
  if (IsImmLSPair(offset, size)) {
    // Encodable in one load/store pair instruction.
    LoadStorePair(rt, rt2, addr, op);
  } else {
    Register base = addr.base();
    if (addr.IsImmediateOffset()) {
      UseScratchRegisterScope temps(this);
      Register temp = temps.AcquireSameSizeAs(base);
      Add(temp, base, offset);
      LoadStorePair(rt, rt2, MemOperand(temp), op);
    } else if (addr.IsPostIndex()) {
      LoadStorePair(rt, rt2, MemOperand(base), op);
      Add(base, base, offset);
    } else {
      DCHECK(addr.IsPreIndex());
      Add(base, base, offset);
      LoadStorePair(rt, rt2, MemOperand(base), op);
    }
  }
}

626 627 628 629

void MacroAssembler::Load(const Register& rt,
                          const MemOperand& addr,
                          Representation r) {
630
  DCHECK(!r.IsDouble());
631 632 633 634 635 636 637 638 639 640 641 642

  if (r.IsInteger8()) {
    Ldrsb(rt, addr);
  } else if (r.IsUInteger8()) {
    Ldrb(rt, addr);
  } else if (r.IsInteger16()) {
    Ldrsh(rt, addr);
  } else if (r.IsUInteger16()) {
    Ldrh(rt, addr);
  } else if (r.IsInteger32()) {
    Ldr(rt.W(), addr);
  } else {
643
    DCHECK(rt.Is64Bits());
644 645 646 647 648 649 650 651
    Ldr(rt, addr);
  }
}


void MacroAssembler::Store(const Register& rt,
                           const MemOperand& addr,
                           Representation r) {
652
  DCHECK(!r.IsDouble());
653 654 655 656 657 658 659 660

  if (r.IsInteger8() || r.IsUInteger8()) {
    Strb(rt, addr);
  } else if (r.IsInteger16() || r.IsUInteger16()) {
    Strh(rt, addr);
  } else if (r.IsInteger32()) {
    Str(rt.W(), addr);
  } else {
661
    DCHECK(rt.Is64Bits());
662 663 664 665 666
    if (r.IsHeapObject()) {
      AssertNotSmi(rt);
    } else if (r.IsSmi()) {
      AssertSmi(rt);
    }
667 668 669 670 671
    Str(rt, addr);
  }
}


672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
    Label *label, ImmBranchType b_type) {
  bool need_longer_range = false;
  // There are two situations in which we care about the offset being out of
  // range:
  //  - The label is bound but too far away.
  //  - The label is not bound but linked, and the previous branch
  //    instruction in the chain is too far away.
  if (label->is_bound() || label->is_linked()) {
    need_longer_range =
      !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
  }
  if (!need_longer_range && !label->is_bound()) {
    int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
    unresolved_branches_.insert(
        std::pair<int, FarBranchInfo>(max_reachable_pc,
                                      FarBranchInfo(pc_offset(), label)));
689 690 691 692
    // Also maintain the next pool check.
    next_veneer_pool_check_ =
      Min(next_veneer_pool_check_,
          max_reachable_pc - kVeneerDistanceCheckMargin);
693 694 695 696 697
  }
  return need_longer_range;
}


698
void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 700
  DCHECK(allow_macro_instructions_);
  DCHECK(!rd.IsZero());
701 702 703 704 705 706

  if (hint == kAdrNear) {
    adr(rd, label);
    return;
  }

707
  DCHECK(hint == kAdrFar);
708 709 710 711 712
  if (label->is_bound()) {
    int label_offset = label->pos() - pc_offset();
    if (Instruction::IsValidPCRelOffset(label_offset)) {
      adr(rd, label);
    } else {
713
      DCHECK(label_offset <= 0);
714 715 716 717 718
      int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
      adr(rd, min_adr_offset);
      Add(rd, rd, label_offset - min_adr_offset);
    }
  } else {
719 720 721
    UseScratchRegisterScope temps(this);
    Register scratch = temps.AcquireX();

722 723 724 725 726 727 728 729 730 731 732
    InstructionAccurateScope scope(
        this, PatchingAssembler::kAdrFarPatchableNInstrs);
    adr(rd, label);
    for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
      nop(ADR_FAR_NOP);
    }
    movz(scratch, 0);
  }
}


733
void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734
  DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
         (bit == -1 || type >= kBranchTypeFirstUsingBit));
  if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
    B(static_cast<Condition>(type), label);
  } else {
    switch (type) {
      case always:        B(label);              break;
      case never:         break;
      case reg_zero:      Cbz(reg, label);       break;
      case reg_not_zero:  Cbnz(reg, label);      break;
      case reg_bit_clear: Tbz(reg, bit, label);  break;
      case reg_bit_set:   Tbnz(reg, bit, label); break;
      default:
        UNREACHABLE();
    }
  }
}


753
void MacroAssembler::B(Label* label, Condition cond) {
754 755
  DCHECK(allow_macro_instructions_);
  DCHECK((cond != al) && (cond != nv));
756 757 758 759 760 761

  Label done;
  bool need_extra_instructions =
    NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);

  if (need_extra_instructions) {
762
    b(&done, NegateCondition(cond));
763
    B(label);
764 765 766 767 768 769 770 771
  } else {
    b(label, cond);
  }
  bind(&done);
}


void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772
  DCHECK(allow_macro_instructions_);
773 774 775 776 777 778 779

  Label done;
  bool need_extra_instructions =
    NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);

  if (need_extra_instructions) {
    tbz(rt, bit_pos, &done);
780
    B(label);
781 782 783 784 785 786 787 788
  } else {
    tbnz(rt, bit_pos, label);
  }
  bind(&done);
}


void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789
  DCHECK(allow_macro_instructions_);
790 791 792 793 794 795 796

  Label done;
  bool need_extra_instructions =
    NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);

  if (need_extra_instructions) {
    tbnz(rt, bit_pos, &done);
797
    B(label);
798 799 800 801 802 803 804 805
  } else {
    tbz(rt, bit_pos, label);
  }
  bind(&done);
}


void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806
  DCHECK(allow_macro_instructions_);
807 808 809 810 811 812 813

  Label done;
  bool need_extra_instructions =
    NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);

  if (need_extra_instructions) {
    cbz(rt, &done);
814
    B(label);
815 816 817 818 819 820 821 822
  } else {
    cbnz(rt, label);
  }
  bind(&done);
}


void MacroAssembler::Cbz(const Register& rt, Label* label) {
823
  DCHECK(allow_macro_instructions_);
824 825 826 827 828 829 830

  Label done;
  bool need_extra_instructions =
    NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);

  if (need_extra_instructions) {
    cbnz(rt, &done);
831
    B(label);
832 833 834 835 836 837 838
  } else {
    cbz(rt, label);
  }
  bind(&done);
}


839 840 841 842 843 844
// Pseudo-instructions.


void MacroAssembler::Abs(const Register& rd, const Register& rm,
                         Label* is_not_representable,
                         Label* is_representable) {
845 846
  DCHECK(allow_macro_instructions_);
  DCHECK(AreSameSizeAndType(rd, rm));
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869

  Cmp(rm, 1);
  Cneg(rd, rm, lt);

  // If the comparison sets the v flag, the input was the smallest value
  // representable by rm, and the mathematical result of abs(rm) is not
  // representable using two's complement.
  if ((is_not_representable != NULL) && (is_representable != NULL)) {
    B(is_not_representable, vs);
    B(is_representable);
  } else if (is_not_representable != NULL) {
    B(is_not_representable, vs);
  } else if (is_representable != NULL) {
    B(is_representable, vc);
  }
}


// Abstracted stack operations.


void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
                          const CPURegister& src2, const CPURegister& src3) {
870
  DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
871 872 873 874

  int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
  int size = src0.SizeInBytes();

875
  PushPreamble(count, size);
876 877 878 879
  PushHelper(count, size, src0, src1, src2, src3);
}


880 881 882 883
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
                          const CPURegister& src2, const CPURegister& src3,
                          const CPURegister& src4, const CPURegister& src5,
                          const CPURegister& src6, const CPURegister& src7) {
884
  DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
885 886 887 888

  int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
  int size = src0.SizeInBytes();

889
  PushPreamble(count, size);
890 891 892 893 894
  PushHelper(4, size, src0, src1, src2, src3);
  PushHelper(count - 4, size, src4, src5, src6, src7);
}


895 896 897 898
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
                         const CPURegister& dst2, const CPURegister& dst3) {
  // It is not valid to pop into the same register more than once in one
  // instruction, not even into the zero register.
899 900 901
  DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
  DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
  DCHECK(dst0.IsValid());
902 903 904 905 906

  int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
  int size = dst0.SizeInBytes();

  PopHelper(count, size, dst0, dst1, dst2, dst3);
907
  PopPostamble(count, size);
908 909 910
}


911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
                         const CPURegister& dst2, const CPURegister& dst3,
                         const CPURegister& dst4, const CPURegister& dst5,
                         const CPURegister& dst6, const CPURegister& dst7) {
  // It is not valid to pop into the same register more than once in one
  // instruction, not even into the zero register.
  DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
  DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
  DCHECK(dst0.IsValid());

  int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
  int size = dst0.SizeInBytes();

  PopHelper(4, size, dst0, dst1, dst2, dst3);
  PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
  PopPostamble(count, size);
}


930 931 932 933 934 935 936 937 938 939 940
void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
  int size = src0.SizeInBytes() + src1.SizeInBytes();

  PushPreamble(size);
  // Reserve room for src0 and push src1.
  str(src1, MemOperand(StackPointer(), -size, PreIndex));
  // Fill the gap with src0.
  str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
}


941 942
void MacroAssembler::PushPopQueue::PushQueued(
    PreambleDirective preamble_directive) {
943 944
  if (queued_.empty()) return;

945 946 947
  if (preamble_directive == WITH_PREAMBLE) {
    masm_->PushPreamble(size_);
  }
948

949 950
  size_t count = queued_.size();
  size_t index = 0;
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
  while (index < count) {
    // PushHelper can only handle registers with the same size and type, and it
    // can handle only four at a time. Batch them up accordingly.
    CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
    int batch_index = 0;
    do {
      batch[batch_index++] = queued_[index++];
    } while ((batch_index < 4) && (index < count) &&
             batch[0].IsSameSizeAndType(queued_[index]));

    masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
                      batch[0], batch[1], batch[2], batch[3]);
  }

  queued_.clear();
}


void MacroAssembler::PushPopQueue::PopQueued() {
  if (queued_.empty()) return;

972 973
  size_t count = queued_.size();
  size_t index = 0;
974 975 976 977 978 979 980 981 982 983 984 985 986 987
  while (index < count) {
    // PopHelper can only handle registers with the same size and type, and it
    // can handle only four at a time. Batch them up accordingly.
    CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
    int batch_index = 0;
    do {
      batch[batch_index++] = queued_[index++];
    } while ((batch_index < 4) && (index < count) &&
             batch[0].IsSameSizeAndType(queued_[index]));

    masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
                     batch[0], batch[1], batch[2], batch[3]);
  }

988
  masm_->PopPostamble(size_);
989 990 991 992
  queued_.clear();
}


993 994 995
void MacroAssembler::PushCPURegList(CPURegList registers) {
  int size = registers.RegisterSizeInBytes();

996
  PushPreamble(registers.Count(), size);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
  // Push up to four registers at a time because if the current stack pointer is
  // csp and reg_size is 32, registers must be pushed in blocks of four in order
  // to maintain the 16-byte alignment for csp.
  while (!registers.IsEmpty()) {
    int count_before = registers.Count();
    const CPURegister& src0 = registers.PopHighestIndex();
    const CPURegister& src1 = registers.PopHighestIndex();
    const CPURegister& src2 = registers.PopHighestIndex();
    const CPURegister& src3 = registers.PopHighestIndex();
    int count = count_before - registers.Count();
    PushHelper(count, size, src0, src1, src2, src3);
  }
}


void MacroAssembler::PopCPURegList(CPURegList registers) {
  int size = registers.RegisterSizeInBytes();

  // Pop up to four registers at a time because if the current stack pointer is
  // csp and reg_size is 32, registers must be pushed in blocks of four in
  // order to maintain the 16-byte alignment for csp.
  while (!registers.IsEmpty()) {
    int count_before = registers.Count();
    const CPURegister& dst0 = registers.PopLowestIndex();
    const CPURegister& dst1 = registers.PopLowestIndex();
    const CPURegister& dst2 = registers.PopLowestIndex();
    const CPURegister& dst3 = registers.PopLowestIndex();
    int count = count_before - registers.Count();
    PopHelper(count, size, dst0, dst1, dst2, dst3);
  }
1027
  PopPostamble(registers.Count(), size);
1028 1029 1030
}


1031
void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 1033
  int size = src.SizeInBytes();

1034
  PushPreamble(count, size);
1035 1036

  if (FLAG_optimize_for_size && count > 8) {
1037 1038 1039
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

1040
    Label loop;
1041
    __ Mov(temp, count / 2);
1042 1043
    __ Bind(&loop);
    PushHelper(2, size, src, src, NoReg, NoReg);
1044
    __ Subs(temp, temp, 1);
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
    __ B(ne, &loop);

    count %= 2;
  }

  // Push up to four registers at a time if possible because if the current
  // stack pointer is csp and the register size is 32, registers must be pushed
  // in blocks of four in order to maintain the 16-byte alignment for csp.
  while (count >= 4) {
    PushHelper(4, size, src, src, src, src);
    count -= 4;
  }
  if (count >= 2) {
    PushHelper(2, size, src, src, NoReg, NoReg);
    count -= 2;
  }
  if (count == 1) {
    PushHelper(1, size, src, NoReg, NoReg, NoReg);
    count -= 1;
  }
1065
  DCHECK(count == 0);
1066 1067 1068
}


1069
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070
  PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1071

1072 1073
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireSameSizeAs(count);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114

  if (FLAG_optimize_for_size) {
    Label loop, done;

    Subs(temp, count, 1);
    B(mi, &done);

    // Push all registers individually, to save code size.
    Bind(&loop);
    Subs(temp, temp, 1);
    PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
    B(pl, &loop);

    Bind(&done);
  } else {
    Label loop, leftover2, leftover1, done;

    Subs(temp, count, 4);
    B(mi, &leftover2);

    // Push groups of four first.
    Bind(&loop);
    Subs(temp, temp, 4);
    PushHelper(4, src.SizeInBytes(), src, src, src, src);
    B(pl, &loop);

    // Push groups of two.
    Bind(&leftover2);
    Tbz(count, 1, &leftover1);
    PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);

    // Push the last one (if required).
    Bind(&leftover1);
    Tbz(count, 0, &done);
    PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);

    Bind(&done);
  }
}


1115 1116 1117 1118 1119 1120 1121 1122
void MacroAssembler::PushHelper(int count, int size,
                                const CPURegister& src0,
                                const CPURegister& src1,
                                const CPURegister& src2,
                                const CPURegister& src3) {
  // Ensure that we don't unintentially modify scratch or debug registers.
  InstructionAccurateScope scope(this);

1123 1124
  DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
  DCHECK(size == src0.SizeInBytes());
1125 1126 1127 1128 1129

  // When pushing multiple registers, the store order is chosen such that
  // Push(a, b) is equivalent to Push(a) followed by Push(b).
  switch (count) {
    case 1:
1130
      DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 1132 1133
      str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
      break;
    case 2:
1134
      DCHECK(src2.IsNone() && src3.IsNone());
1135 1136 1137
      stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
      break;
    case 3:
1138
      DCHECK(src3.IsNone());
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
      stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
      str(src0, MemOperand(StackPointer(), 2 * size));
      break;
    case 4:
      // Skip over 4 * size, then fill in the gap. This allows four W registers
      // to be pushed using csp, whilst maintaining 16-byte alignment for csp
      // at all times.
      stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
      stp(src1, src0, MemOperand(StackPointer(), 2 * size));
      break;
    default:
      UNREACHABLE();
  }
}


void MacroAssembler::PopHelper(int count, int size,
                               const CPURegister& dst0,
                               const CPURegister& dst1,
                               const CPURegister& dst2,
                               const CPURegister& dst3) {
  // Ensure that we don't unintentially modify scratch or debug registers.
  InstructionAccurateScope scope(this);

1163 1164
  DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
  DCHECK(size == dst0.SizeInBytes());
1165 1166 1167 1168 1169

  // When popping multiple registers, the load order is chosen such that
  // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
  switch (count) {
    case 1:
1170
      DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 1172 1173
      ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
      break;
    case 2:
1174
      DCHECK(dst2.IsNone() && dst3.IsNone());
1175 1176 1177
      ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
      break;
    case 3:
1178
      DCHECK(dst3.IsNone());
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
      ldr(dst2, MemOperand(StackPointer(), 2 * size));
      ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
      break;
    case 4:
      // Load the higher addresses first, then load the lower addresses and
      // skip the whole block in the second instruction. This allows four W
      // registers to be popped using csp, whilst maintaining 16-byte alignment
      // for csp at all times.
      ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
      ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
      break;
    default:
      UNREACHABLE();
  }
}


1196
void MacroAssembler::PushPreamble(Operand total_size) {
1197 1198 1199 1200
  if (csp.Is(StackPointer())) {
    // If the current stack pointer is csp, then it must be aligned to 16 bytes
    // on entry and the total size of the specified registers must also be a
    // multiple of 16 bytes.
1201
    if (total_size.IsImmediate()) {
1202
      DCHECK((total_size.ImmediateValue() % 16) == 0);
1203 1204 1205 1206
    }

    // Don't check access size for non-immediate sizes. It's difficult to do
    // well, and it will be caught by hardware (or the simulator) anyway.
1207 1208 1209 1210
  } else {
    // Even if the current stack pointer is not the system stack pointer (csp),
    // the system stack pointer will still be modified in order to comply with
    // ABI rules about accessing memory below the system stack pointer.
1211
    BumpSystemStackPointer(total_size);
1212 1213 1214 1215
  }
}


1216
void MacroAssembler::PopPostamble(Operand total_size) {
1217 1218 1219 1220
  if (csp.Is(StackPointer())) {
    // If the current stack pointer is csp, then it must be aligned to 16 bytes
    // on entry and the total size of the specified registers must also be a
    // multiple of 16 bytes.
1221
    if (total_size.IsImmediate()) {
1222
      DCHECK((total_size.ImmediateValue() % 16) == 0);
1223 1224 1225 1226
    }

    // Don't check access size for non-immediate sizes. It's difficult to do
    // well, and it will be caught by hardware (or the simulator) anyway.
1227 1228 1229 1230 1231
  } else if (emit_debug_code()) {
    // It is safe to leave csp where it is when unwinding the JavaScript stack,
    // but if we keep it matching StackPointer, the simulator can detect memory
    // accesses in the now-free part of the stack.
    SyncSystemStackPointer();
1232 1233 1234 1235 1236 1237
  }
}


void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
  if (offset.IsImmediate()) {
1238
    DCHECK(offset.ImmediateValue() >= 0);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
  } else if (emit_debug_code()) {
    Cmp(xzr, offset);
    Check(le, kStackAccessBelowStackPointer);
  }

  Str(src, MemOperand(StackPointer(), offset));
}


void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
  if (offset.IsImmediate()) {
1250
    DCHECK(offset.ImmediateValue() >= 0);
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
  } else if (emit_debug_code()) {
    Cmp(xzr, offset);
    Check(le, kStackAccessBelowStackPointer);
  }

  Ldr(dst, MemOperand(StackPointer(), offset));
}


void MacroAssembler::PokePair(const CPURegister& src1,
                              const CPURegister& src2,
                              int offset) {
1263 1264
  DCHECK(AreSameSizeAndType(src1, src2));
  DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 1266 1267 1268 1269 1270 1271
  Stp(src1, src2, MemOperand(StackPointer(), offset));
}


void MacroAssembler::PeekPair(const CPURegister& dst1,
                              const CPURegister& dst2,
                              int offset) {
1272 1273
  DCHECK(AreSameSizeAndType(dst1, dst2));
  DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
  Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
}


void MacroAssembler::PushCalleeSavedRegisters() {
  // Ensure that the macro-assembler doesn't use any scratch registers.
  InstructionAccurateScope scope(this);

  // This method must not be called unless the current stack pointer is the
  // system stack pointer (csp).
1284
  DCHECK(csp.Is(StackPointer()));
1285

1286
  MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307

  stp(d14, d15, tos);
  stp(d12, d13, tos);
  stp(d10, d11, tos);
  stp(d8, d9, tos);

  stp(x29, x30, tos);
  stp(x27, x28, tos);    // x28 = jssp
  stp(x25, x26, tos);
  stp(x23, x24, tos);
  stp(x21, x22, tos);
  stp(x19, x20, tos);
}


void MacroAssembler::PopCalleeSavedRegisters() {
  // Ensure that the macro-assembler doesn't use any scratch registers.
  InstructionAccurateScope scope(this);

  // This method must not be called unless the current stack pointer is the
  // system stack pointer (csp).
1308
  DCHECK(csp.Is(StackPointer()));
1309

1310
  MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326

  ldp(x19, x20, tos);
  ldp(x21, x22, tos);
  ldp(x23, x24, tos);
  ldp(x25, x26, tos);
  ldp(x27, x28, tos);    // x28 = jssp
  ldp(x29, x30, tos);

  ldp(d8, d9, tos);
  ldp(d10, d11, tos);
  ldp(d12, d13, tos);
  ldp(d14, d15, tos);
}


void MacroAssembler::AssertStackConsistency() {
1327 1328 1329
  // Avoid emitting code when !use_real_abort() since non-real aborts cause too
  // much code to be generated.
  if (emit_debug_code() && use_real_aborts()) {
1330
    if (csp.Is(StackPointer())) {
1331 1332 1333 1334
      // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true.  We
      // can't check the alignment of csp without using a scratch register (or
      // clobbering the flags), but the processor (or simulator) will abort if
      // it is not properly aligned during a load.
1335
      ldr(xzr, MemOperand(csp, 0));
1336 1337
    }
    if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1338 1339 1340 1341 1342 1343
      Label ok;
      // Check that csp <= StackPointer(), preserving all registers and NZCV.
      sub(StackPointer(), csp, StackPointer());
      cbz(StackPointer(), &ok);                 // Ok if csp == StackPointer().
      tbnz(StackPointer(), kXSignBit, &ok);     // Ok if csp < StackPointer().

1344 1345
      // Avoid generating AssertStackConsistency checks for the Push in Abort.
      { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1346 1347
        // Restore StackPointer().
        sub(StackPointer(), csp, StackPointer());
1348 1349
        Abort(kTheCurrentStackPointerIsBelowCsp);
      }
1350 1351 1352 1353

      bind(&ok);
      // Restore StackPointer().
      sub(StackPointer(), csp, StackPointer());
1354 1355 1356 1357
    }
  }
}

1358 1359 1360 1361 1362 1363 1364 1365
void MacroAssembler::AssertCspAligned() {
  if (emit_debug_code() && use_real_aborts()) {
    // TODO(titzer): use a real assert for alignment check?
    UseScratchRegisterScope scope(this);
    Register temp = scope.AcquireX();
    ldr(temp, MemOperand(csp));
  }
}
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
void MacroAssembler::AssertFPCRState(Register fpcr) {
  if (emit_debug_code()) {
    Label unexpected_mode, done;
    UseScratchRegisterScope temps(this);
    if (fpcr.IsNone()) {
      fpcr = temps.AcquireX();
      Mrs(fpcr, FPCR);
    }

    // Settings left to their default values:
    //   - Assert that flush-to-zero is not set.
    Tbnz(fpcr, FZ_offset, &unexpected_mode);
    //   - Assert that the rounding mode is nearest-with-ties-to-even.
    STATIC_ASSERT(FPTieEven == 0);
    Tst(fpcr, RMode_mask);
    B(eq, &done);

    Bind(&unexpected_mode);
    Abort(kUnexpectedFPCRMode);

    Bind(&done);
  }
}


void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
                                     const FPRegister& src) {
  AssertFPCRState();

1396 1397 1398
  // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
  // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
  // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1399 1400 1401 1402
  Fsub(dst, src, fp_zero);
}


1403
void MacroAssembler::LoadRoot(CPURegister destination,
1404 1405 1406 1407 1408 1409 1410 1411 1412
                              Heap::RootListIndex index) {
  // TODO(jbramley): Most root values are constants, and can be synthesized
  // without a load. Refer to the ARM back end for details.
  Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
}


void MacroAssembler::StoreRoot(Register source,
                               Heap::RootListIndex index) {
1413
  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
  Str(source, MemOperand(root, index << kPointerSizeLog2));
}


void MacroAssembler::LoadTrueFalseRoots(Register true_root,
                                        Register false_root) {
  STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
  Ldp(true_root, false_root,
      MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
}


void MacroAssembler::LoadHeapObject(Register result,
                                    Handle<HeapObject> object) {
1428
  Mov(result, Operand(object));
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
}


void MacroAssembler::LoadInstanceDescriptors(Register map,
                                             Register descriptors) {
  Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}


void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
  Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}


void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1446
  Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1447 1448 1449 1450 1451
  And(dst, dst, Map::EnumLengthBits::kMask);
}


void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1452 1453
  EnumLengthUntagged(dst, map);
  SmiTag(dst, dst);
1454 1455 1456
}


1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
void MacroAssembler::LoadAccessor(Register dst, Register holder,
                                  int accessor_index,
                                  AccessorComponent accessor) {
  Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
  LoadInstanceDescriptors(dst, dst);
  Ldr(dst,
      FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
  int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
                                           : AccessorPair::kSetterOffset;
  Ldr(dst, FieldMemOperand(dst, offset));
}


1470 1471 1472
void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
                                    Register scratch1, Register scratch2,
                                    Register scratch3, Register scratch4,
1473
                                    Label* call_runtime) {
1474
  DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
1475 1476 1477

  Register empty_fixed_array_value = scratch0;
  Register current_object = scratch1;
1478
  Register null_value = scratch4;
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494

  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
  Label next, start;

  Mov(current_object, object);

  // Check if the enum length field is properly initialized, indicating that
  // there is an enum cache.
  Register map = scratch2;
  Register enum_length = scratch3;
  Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));

  EnumLengthUntagged(enum_length, map);
  Cmp(enum_length, kInvalidEnumCacheSentinel);
  B(eq, call_runtime);

1495
  LoadRoot(null_value, Heap::kNullValueRootIndex);
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
  B(&start);

  Bind(&next);
  Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));

  // For all objects but the receiver, check that the cache is empty.
  EnumLengthUntagged(enum_length, map);
  Cbnz(enum_length, call_runtime);

  Bind(&start);

  // Check that there are no elements. Register current_object contains the
  // current JS object we've reached through the prototype chain.
  Label no_elements;
  Ldr(current_object, FieldMemOperand(current_object,
                                      JSObject::kElementsOffset));
  Cmp(current_object, empty_fixed_array_value);
  B(eq, &no_elements);

  // Second chance, the object may be using the empty slow element dictionary.
  CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
  B(ne, call_runtime);

  Bind(&no_elements);
  Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
  Cmp(current_object, null_value);
  B(ne, &next);
}


void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
                                                     Register scratch1,
                                                     Register scratch2,
                                                     Label* no_memento_found) {
1530 1531
  Label map_check;
  Label top_check;
1532
  ExternalReference new_space_allocation_top_adr =
1533
      ExternalReference::new_space_allocation_top_address(isolate());
1534 1535 1536 1537 1538 1539 1540 1541
  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
  const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;

  // Bail out if the object is not in new space.
  JumpIfNotInNewSpace(receiver, no_memento_found);
  Add(scratch1, receiver, kMementoEndOffset);
  // If the object is in new space, we need to check whether it is on the same
  // page as the current top.
1542 1543 1544
  Mov(scratch2, new_space_allocation_top_adr);
  Ldr(scratch2, MemOperand(scratch2));
  Eor(scratch2, scratch1, scratch2);
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
  Tst(scratch2, ~Page::kPageAlignmentMask);
  B(eq, &top_check);
  // The object is on a different page than allocation top. Bail out if the
  // object sits on the page boundary as no memento can follow and we cannot
  // touch the memory following it.
  Eor(scratch2, scratch1, receiver);
  Tst(scratch2, ~Page::kPageAlignmentMask);
  B(ne, no_memento_found);
  // Continue with the actual map check.
  jmp(&map_check);
  // If top is on the same page as the current object, we need to check whether
  // we are below top.
  bind(&top_check);
1558 1559 1560
  Mov(scratch2, new_space_allocation_top_adr);
  Ldr(scratch2, MemOperand(scratch2));
  Cmp(scratch1, scratch2);
1561
  B(gt, no_memento_found);
1562 1563 1564 1565
  // Memento map check.
  bind(&map_check);
  Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
  Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
1566 1567 1568 1569 1570 1571
}


void MacroAssembler::InNewSpace(Register object,
                                Condition cond,
                                Label* branch) {
1572
  DCHECK(cond == eq || cond == ne);
1573
  UseScratchRegisterScope temps(this);
mlippautz's avatar
mlippautz committed
1574 1575
  CheckPageFlag(object, temps.AcquireSameSizeAs(object),
                MemoryChunk::kIsInNewSpaceMask, cond, branch);
1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
}


void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    Tst(object, kSmiTagMask);
    Check(eq, reason);
  }
}


void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    Tst(object, kSmiTagMask);
    Check(ne, reason);
  }
}


void MacroAssembler::AssertName(Register object) {
  if (emit_debug_code()) {
1599
    AssertNotSmi(object, kOperandIsASmiAndNotAName);
1600

1601 1602 1603 1604 1605
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1606 1607 1608 1609 1610
    Check(ls, kOperandIsNotAName);
  }
}


1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
void MacroAssembler::AssertFunction(Register object) {
  if (emit_debug_code()) {
    AssertNotSmi(object, kOperandIsASmiAndNotAFunction);

    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
    Check(eq, kOperandIsNotAFunction);
  }
}


1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
void MacroAssembler::AssertBoundFunction(Register object) {
  if (emit_debug_code()) {
    AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);

    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
    Check(eq, kOperandIsNotABoundFunction);
  }
}

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
void MacroAssembler::AssertGeneratorObject(Register object) {
  if (emit_debug_code()) {
    AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);

    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
    Check(eq, kOperandIsNotAGeneratorObject);
  }
}
1647

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
void MacroAssembler::AssertReceiver(Register object) {
  if (emit_debug_code()) {
    AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);

    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
    CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
    Check(hs, kOperandIsNotAReceiver);
  }
}


1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                     Register scratch) {
  if (emit_debug_code()) {
    Label done_checking;
    AssertNotSmi(object);
    JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
    Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
    Assert(eq, kExpectedUndefinedOrCell);
    Bind(&done_checking);
  }
}


1676 1677
void MacroAssembler::AssertString(Register object) {
  if (emit_debug_code()) {
1678 1679
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();
1680 1681 1682 1683 1684 1685 1686
    STATIC_ASSERT(kSmiTag == 0);
    Tst(object, kSmiTagMask);
    Check(ne, kOperandIsASmiAndNotAString);
    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
    Check(lo, kOperandIsNotAString);
  }
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
}


void MacroAssembler::AssertPositiveOrZero(Register value) {
  if (emit_debug_code()) {
    Label done;
    int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
    Tbz(value, sign_bit, &done);
    Abort(kUnexpectedNegativeValue);
    Bind(&done);
  }
1698 1699
}

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
void MacroAssembler::AssertNotNumber(Register value) {
  if (emit_debug_code()) {
    STATIC_ASSERT(kSmiTag == 0);
    Tst(value, kSmiTagMask);
    Check(ne, kOperandIsANumber);
    Label done;
    JumpIfNotHeapNumber(value, &done);
    Abort(kOperandIsANumber);
    Bind(&done);
  }
}

1712 1713 1714 1715 1716 1717 1718 1719 1720
void MacroAssembler::AssertNumber(Register value) {
  if (emit_debug_code()) {
    Label done;
    JumpIfSmi(value, &done);
    JumpIfHeapNumber(value, &done);
    Abort(kOperandIsNotANumber);
    Bind(&done);
  }
}
1721 1722

void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1723
  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1724
  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1725 1726 1727 1728
}


void MacroAssembler::TailCallStub(CodeStub* stub) {
1729
  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
}


void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                 int num_arguments,
                                 SaveFPRegsMode save_doubles) {
  // All arguments must be on the stack before this function is called.
  // x0 holds the return value after the call.

  // Check that the number of arguments matches what the function expects.
  // If f->nargs is -1, the function can accept a variable number of arguments.
1741
  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1742 1743 1744

  // Place the necessary arguments.
  Mov(x0, num_arguments);
1745
  Mov(x1, ExternalReference(f, isolate()));
1746

1747
  CEntryStub stub(isolate(), 1, save_doubles);
1748 1749 1750 1751 1752 1753 1754
  CallStub(&stub);
}


void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                           int num_arguments) {
  Mov(x0, num_arguments);
1755
  Mov(x1, ext);
1756

1757
  CEntryStub stub(isolate(), 1);
1758 1759 1760
  CallStub(&stub);
}

1761 1762
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
                                             bool builtin_exit_frame) {
1763
  Mov(x1, builtin);
1764 1765
  CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
                  builtin_exit_frame);
1766
  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1767 1768
}

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
  const Runtime::Function* function = Runtime::FunctionForId(fid);
  DCHECK_EQ(1, function->result_size);
  if (function->nargs >= 0) {
    // TODO(1236192): Most runtime routines don't need the number of
    // arguments passed in because it is constant. At some point we
    // should remove this need and make the runtime routine entry code
    // smarter.
    Mov(x0, function->nargs);
  }
  JumpToExternalReference(ExternalReference(fid, isolate()));
1780 1781 1782 1783 1784 1785 1786 1787
}


void MacroAssembler::InitializeNewString(Register string,
                                         Register length,
                                         Heap::RootListIndex map_index,
                                         Register scratch1,
                                         Register scratch2) {
1788
  DCHECK(!AreAliased(string, length, scratch1, scratch2));
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
  LoadRoot(scratch2, map_index);
  SmiTag(scratch1, length);
  Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));

  Mov(scratch2, String::kEmptyHashField);
  Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
  Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
}


int MacroAssembler::ActivationFrameAlignment() {
1800
#if V8_HOST_ARCH_ARM64
1801 1802 1803 1804
  // Running on the real platform. Use the alignment as mandated by the local
  // environment.
  // Note: This will break if we ever start generating snapshots on one ARM
  // platform for another ARM platform with a different alignment.
1805
  return base::OS::ActivationFrameAlignment();
1806
#else  // V8_HOST_ARCH_ARM64
1807 1808 1809 1810 1811
  // If we are using the simulator then we should always align to the expected
  // alignment. As the simulator is used to generate snapshots we do not know
  // if the target platform will need alignment, so this is controlled from a
  // flag.
  return FLAG_sim_stack_alignment;
1812
#endif  // V8_HOST_ARCH_ARM64
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
}


void MacroAssembler::CallCFunction(ExternalReference function,
                                   int num_of_reg_args) {
  CallCFunction(function, num_of_reg_args, 0);
}


void MacroAssembler::CallCFunction(ExternalReference function,
                                   int num_of_reg_args,
                                   int num_of_double_args) {
1825 1826
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
1827
  Mov(temp, function);
1828
  CallCFunction(temp, num_of_reg_args, num_of_double_args);
1829 1830 1831 1832 1833 1834
}


void MacroAssembler::CallCFunction(Register function,
                                   int num_of_reg_args,
                                   int num_of_double_args) {
1835
  DCHECK(has_frame());
1836 1837
  // We can pass 8 integer arguments in registers. If we need to pass more than
  // that, we'll need to implement support for passing them on the stack.
1838
  DCHECK(num_of_reg_args <= 8);
1839 1840 1841 1842 1843 1844 1845 1846

  // If we're passing doubles, we're limited to the following prototypes
  // (defined by ExternalReference::Type):
  //  BUILTIN_COMPARE_CALL:  int f(double, double)
  //  BUILTIN_FP_FP_CALL:    double f(double, double)
  //  BUILTIN_FP_CALL:       double f(double)
  //  BUILTIN_FP_INT_CALL:   double f(double, int)
  if (num_of_double_args > 0) {
1847 1848
    DCHECK(num_of_reg_args <= 1);
    DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
  }


  // If the stack pointer is not csp, we need to derive an aligned csp from the
  // current stack pointer.
  const Register old_stack_pointer = StackPointer();
  if (!csp.Is(old_stack_pointer)) {
    AssertStackConsistency();

    int sp_alignment = ActivationFrameAlignment();
    // The ABI mandates at least 16-byte alignment.
1860
    DCHECK(sp_alignment >= 16);
1861
    DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1862 1863 1864

    // The current stack pointer is a callee saved register, and is preserved
    // across the call.
1865
    DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880

    // Align and synchronize the system stack pointer with jssp.
    Bic(csp, old_stack_pointer, sp_alignment - 1);
    SetStackPointer(csp);
  }

  // Call directly. The function called cannot cause a GC, or allow preemption,
  // so the return address in the link register stays correct.
  Call(function);

  if (!csp.Is(old_stack_pointer)) {
    if (emit_debug_code()) {
      // Because the stack pointer must be aligned on a 16-byte boundary, the
      // aligned csp can be up to 12 bytes below the jssp. This is the case
      // where we only pushed one W register on top of an aligned jssp.
1881 1882
      UseScratchRegisterScope temps(this);
      Register temp = temps.AcquireX();
1883
      DCHECK(ActivationFrameAlignment() == 16);
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
      Sub(temp, csp, old_stack_pointer);
      // We want temp <= 0 && temp >= -12.
      Cmp(temp, 0);
      Ccmp(temp, -12, NFlag, le);
      Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
    }
    SetStackPointer(old_stack_pointer);
  }
}


void MacroAssembler::Jump(Register target) {
  Br(target);
}


1900 1901 1902
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                          Condition cond) {
  if (cond == nv) return;
1903 1904
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
1905 1906
  Label done;
  if (cond != al) B(NegateCondition(cond), &done);
1907 1908
  Mov(temp, Operand(target, rmode));
  Br(temp);
1909
  Bind(&done);
1910 1911 1912
}


1913 1914
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
                          Condition cond) {
1915
  DCHECK(!RelocInfo::IsCodeTarget(rmode));
1916
  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
1917 1918 1919
}


1920 1921
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
                          Condition cond) {
1922
  DCHECK(RelocInfo::IsCodeTarget(rmode));
1923
  AllowDeferredHandleDereference embedding_raw_address;
1924
  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
1925 1926 1927 1928
}


void MacroAssembler::Call(Register target) {
1929
  BlockPoolsScope scope(this);
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
#ifdef DEBUG
  Label start_call;
  Bind(&start_call);
#endif

  Blr(target);

#ifdef DEBUG
  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
#endif
}


void MacroAssembler::Call(Label* target) {
1944
  BlockPoolsScope scope(this);
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960
#ifdef DEBUG
  Label start_call;
  Bind(&start_call);
#endif

  Bl(target);

#ifdef DEBUG
  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
#endif
}


// MacroAssembler::CallSize is sensitive to changes in this function, as it
// requires to know how many instructions are used to branch to the target.
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1961
  BlockPoolsScope scope(this);
1962 1963 1964 1965 1966 1967
#ifdef DEBUG
  Label start_call;
  Bind(&start_call);
#endif

  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1968
  DCHECK(rmode != RelocInfo::NONE32);
1969

1970 1971 1972
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();

1973
  if (rmode == RelocInfo::NONE64) {
1974
    // Addresses are 48 bits so we never need to load the upper 16 bits.
1975
    uint64_t imm = reinterpret_cast<uint64_t>(target);
1976
    // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1977
    DCHECK(((imm >> 48) & 0xffff) == 0);
1978 1979 1980
    movz(temp, (imm >> 0) & 0xffff, 0);
    movk(temp, (imm >> 16) & 0xffff, 16);
    movk(temp, (imm >> 32) & 0xffff, 32);
1981
  } else {
1982
    Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1983
  }
1984
  Blr(temp);
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
#ifdef DEBUG
  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
#endif
}


void MacroAssembler::Call(Handle<Code> code,
                          RelocInfo::Mode rmode,
                          TypeFeedbackId ast_id) {
#ifdef DEBUG
  Label start_call;
  Bind(&start_call);
#endif

  if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
    SetRecordedAstId(ast_id);
    rmode = RelocInfo::CODE_TARGET_WITH_ID;
  }

  AllowDeferredHandleDereference embedding_raw_address;
  Call(reinterpret_cast<Address>(code.location()), rmode);

#ifdef DEBUG
  // Check the size of the code generated.
  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
#endif
}


int MacroAssembler::CallSize(Register target) {
  USE(target);
  return kInstructionSize;
}


int MacroAssembler::CallSize(Label* target) {
  USE(target);
  return kInstructionSize;
}


int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
  USE(target);

  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2030
  DCHECK(rmode != RelocInfo::NONE32);
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046

  if (rmode == RelocInfo::NONE64) {
    return kCallSizeWithoutRelocation;
  } else {
    return kCallSizeWithRelocation;
  }
}


int MacroAssembler::CallSize(Handle<Code> code,
                             RelocInfo::Mode rmode,
                             TypeFeedbackId ast_id) {
  USE(code);
  USE(ast_id);

  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2047
  DCHECK(rmode != RelocInfo::NONE32);
2048 2049 2050 2051 2052 2053 2054 2055 2056

  if (rmode == RelocInfo::NONE64) {
    return kCallSizeWithoutRelocation;
  } else {
    return kCallSizeWithRelocation;
  }
}


2057 2058 2059
void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
                                      SmiCheckType smi_check_type) {
  Label on_not_heap_number;
2060

2061 2062 2063
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(object, &on_not_heap_number);
  }
2064 2065 2066

  AssertNotSmi(object);

2067 2068 2069
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2070
  JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2071

2072
  Bind(&on_not_heap_number);
2073 2074 2075 2076 2077
}


void MacroAssembler::JumpIfNotHeapNumber(Register object,
                                         Label* on_not_heap_number,
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
                                         SmiCheckType smi_check_type) {
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(object, on_not_heap_number);
  }

  AssertNotSmi(object);

  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
  JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2089 2090 2091
}


2092 2093 2094 2095 2096
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
                                             FPRegister value,
                                             FPRegister scratch_d,
                                             Label* on_successful_conversion,
                                             Label* on_failed_conversion) {
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
  // Convert to an int and back again, then compare with the original value.
  Fcvtzs(as_int, value);
  Scvtf(scratch_d, as_int);
  Fcmp(value, scratch_d);

  if (on_successful_conversion) {
    B(on_successful_conversion, eq);
  }
  if (on_failed_conversion) {
    B(on_failed_conversion, ne);
  }
}


2111
void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2112 2113
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
2114 2115
  // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
  // cause overflow.
2116 2117
  Fmov(temp, input);
  Cmp(temp, 1);
2118 2119 2120 2121 2122 2123
}


void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
                                     Label* on_negative_zero) {
  TestForMinusZero(input);
2124 2125 2126 2127
  B(vs, on_negative_zero);
}


2128 2129
void MacroAssembler::JumpIfMinusZero(Register input,
                                     Label* on_negative_zero) {
2130
  DCHECK(input.Is64Bits());
2131 2132 2133 2134 2135 2136 2137
  // Floating point value is in an integer register. Detect -0.0 by subtracting
  // 1 (cmp), which will cause overflow.
  Cmp(input, 1);
  B(vs, on_negative_zero);
}


2138 2139 2140 2141 2142
void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
  // Clamp the value to [0..255].
  Cmp(input.W(), Operand(input.W(), UXTB));
  // If input < input & 0xff, it must be < 0, so saturate to 0.
  Csel(output.W(), wzr, input.W(), lt);
2143 2144
  // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
  Csel(output.W(), output.W(), 255, le);
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177
}


void MacroAssembler::ClampInt32ToUint8(Register in_out) {
  ClampInt32ToUint8(in_out, in_out);
}


void MacroAssembler::ClampDoubleToUint8(Register output,
                                        DoubleRegister input,
                                        DoubleRegister dbl_scratch) {
  // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
  //   - Inputs lower than 0 (including -infinity) produce 0.
  //   - Inputs higher than 255 (including +infinity) produce 255.
  // Also, it seems that PIXEL types use round-to-nearest rather than
  // round-towards-zero.

  // Squash +infinity before the conversion, since Fcvtnu will normally
  // convert it to 0.
  Fmov(dbl_scratch, 255);
  Fmin(dbl_scratch, dbl_scratch, input);

  // Convert double to unsigned integer. Values less than zero become zero.
  // Values greater than 255 have already been clamped to 255.
  Fcvtnu(output, dbl_scratch);
}


void MacroAssembler::CopyBytes(Register dst,
                               Register src,
                               Register length,
                               Register scratch,
                               CopyHint hint) {
2178 2179 2180
  UseScratchRegisterScope temps(this);
  Register tmp1 = temps.AcquireX();
  Register tmp2 = temps.AcquireX();
2181 2182
  DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
  DCHECK(!AreAliased(src, dst, csp));
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196

  if (emit_debug_code()) {
    // Check copy length.
    Cmp(length, 0);
    Assert(ge, kUnexpectedNegativeValue);

    // Check src and dst buffers don't overlap.
    Add(scratch, src, length);  // Calculate end of src buffer.
    Cmp(scratch, dst);
    Add(scratch, dst, length);  // Calculate end of dst buffer.
    Ccmp(scratch, src, ZFlag, gt);
    Assert(le, kCopyBuffersOverlap);
  }

2197
  Label short_copy, short_loop, bulk_loop, done;
2198

2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
  if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
    Register bulk_length = scratch;
    int pair_size = 2 * kXRegSize;
    int pair_mask = pair_size - 1;

    Bic(bulk_length, length, pair_mask);
    Cbz(bulk_length, &short_copy);
    Bind(&bulk_loop);
    Sub(bulk_length, bulk_length, pair_size);
    Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
    Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
    Cbnz(bulk_length, &bulk_loop);

    And(length, length, pair_mask);
  }

  Bind(&short_copy);
  Cbz(length, &done);
  Bind(&short_loop);
2218
  Sub(length, length, 1);
2219 2220 2221 2222 2223
  Ldrb(tmp1, MemOperand(src, 1, PostIndex));
  Strb(tmp1, MemOperand(dst, 1, PostIndex));
  Cbnz(length, &short_loop);


2224 2225 2226 2227
  Bind(&done);
}


2228 2229 2230 2231
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
                                                Register end_address,
                                                Register filler) {
  DCHECK(!current_address.Is(csp));
2232
  UseScratchRegisterScope temps(this);
2233
  Register distance_in_words = temps.AcquireX();
2234 2235
  Label done;

2236 2237 2238
  // Calculate the distance. If it's <= zero then there's nothing to do.
  Subs(distance_in_words, end_address, current_address);
  B(le, &done);
2239 2240

  // There's at least one field to fill, so do this unconditionally.
2241
  Str(filler, MemOperand(current_address));
2242

2243 2244 2245 2246 2247
  // If the distance_in_words consists of odd number of words we advance
  // start_address by one word, otherwise the pairs loop will ovwerite the
  // field that was stored above.
  And(distance_in_words, distance_in_words, kPointerSize);
  Add(current_address, current_address, distance_in_words);
2248 2249

  // Store filler to memory in pairs.
2250
  Label loop, entry;
2251 2252
  B(&entry);
  Bind(&loop);
2253
  Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
2254
  Bind(&entry);
2255 2256
  Cmp(current_address, end_address);
  B(lo, &loop);
2257 2258

  Bind(&done);
2259 2260 2261
}


2262 2263 2264
void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
    Register first, Register second, Register scratch1, Register scratch2,
    Label* failure, SmiCheckType smi_check) {
2265 2266 2267
  if (smi_check == DO_SMI_CHECK) {
    JumpIfEitherSmi(first, second, failure);
  } else if (emit_debug_code()) {
2268
    DCHECK(smi_check == DONT_DO_SMI_CHECK);
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
    Label not_smi;
    JumpIfEitherSmi(first, second, NULL, &not_smi);

    // At least one input is a smi, but the flags indicated a smi check wasn't
    // needed.
    Abort(kUnexpectedSmi);

    Bind(&not_smi);
  }

2279
  // Test that both first and second are sequential one-byte strings.
2280 2281 2282 2283 2284
  Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
  Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
  Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
  Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));

2285 2286
  JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
                                                 scratch2, failure);
2287 2288 2289
}


2290 2291
void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
    Register first, Register second, Register scratch1, Register scratch2,
2292
    Label* failure) {
2293 2294
  DCHECK(!AreAliased(scratch1, second));
  DCHECK(!AreAliased(scratch1, scratch2));
2295
  const int kFlatOneByteStringMask =
2296
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2297 2298
  const int kFlatOneByteStringTag =
      kStringTag | kOneByteStringTag | kSeqStringTag;
2299 2300 2301 2302
  And(scratch1, first, kFlatOneByteStringMask);
  And(scratch2, second, kFlatOneByteStringMask);
  Cmp(scratch1, kFlatOneByteStringTag);
  Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2303 2304 2305 2306
  B(ne, failure);
}


2307 2308 2309 2310
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
                                                              Register scratch,
                                                              Label* failure) {
  const int kFlatOneByteStringMask =
2311
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2312
  const int kFlatOneByteStringTag =
2313
      kStringTag | kOneByteStringTag | kSeqStringTag;
2314 2315
  And(scratch, type, kFlatOneByteStringMask);
  Cmp(scratch, kFlatOneByteStringTag);
2316 2317 2318 2319
  B(ne, failure);
}


2320 2321
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
    Register first, Register second, Register scratch1, Register scratch2,
2322
    Label* failure) {
2323
  DCHECK(!AreAliased(first, second, scratch1, scratch2));
2324
  const int kFlatOneByteStringMask =
2325
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2326
  const int kFlatOneByteStringTag =
2327
      kStringTag | kOneByteStringTag | kSeqStringTag;
2328 2329 2330 2331
  And(scratch1, first, kFlatOneByteStringMask);
  And(scratch2, second, kFlatOneByteStringMask);
  Cmp(scratch1, kFlatOneByteStringTag);
  Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2332 2333 2334 2335
  B(ne, failure);
}


2336 2337
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
                                                     Label* not_unique_name) {
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
  // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
  //   continue
  // } else {
  //   goto not_unique_name
  // }
  Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
  Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
  B(ne, not_unique_name);
}

2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
                                        Register caller_args_count_reg,
                                        Register scratch0, Register scratch1) {
#if DEBUG
  if (callee_args_count.is_reg()) {
    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
                       scratch1));
  } else {
    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
  }
#endif

  // Calculate the end of destination area where we will put the arguments
  // after we drop current frame. We add kPointerSize to count the receiver
  // argument which is not included into formal parameters count.
  Register dst_reg = scratch0;
  __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
  __ add(dst_reg, dst_reg,
         Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));

  Register src_reg = caller_args_count_reg;
  // Calculate the end of source area. +kPointerSize is for the receiver.
  if (callee_args_count.is_reg()) {
    add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
    add(src_reg, src_reg, Operand(kPointerSize));
  } else {
    add(src_reg, jssp,
        Operand((callee_args_count.immediate() + 1) * kPointerSize));
  }

  if (FLAG_debug_code) {
    __ Cmp(src_reg, dst_reg);
    __ Check(lo, kStackAccessBelowStackPointer);
  }

  // Restore caller's frame pointer and return address now as they will be
  // overwritten by the copying loop.
  __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
  __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));

  // Now copy callee arguments to the caller frame going backwards to avoid
  // callee arguments corruption (source and destination areas could overlap).

  // Both src_reg and dst_reg are pointing to the word after the one to copy,
  // so they must be pre-decremented in the loop.
  Register tmp_reg = scratch1;
  Label loop, entry;
  __ B(&entry);
  __ bind(&loop);
  __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
  __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
  __ bind(&entry);
  __ Cmp(jssp, src_reg);
  __ B(ne, &loop);

  // Leave current frame.
  __ Mov(jssp, dst_reg);
  __ SetStackPointer(jssp);
  __ AssertStackConsistency();
}
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428

void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    Label* done,
                                    InvokeFlag flag,
                                    bool* definitely_mismatches,
                                    const CallWrapper& call_wrapper) {
  bool definitely_matches = false;
  *definitely_mismatches = false;
  Label regular_invoke;

  // Check whether the expected and actual arguments count match. If not,
  // setup registers according to contract with ArgumentsAdaptorTrampoline:
  //  x0: actual arguments count.
  //  x1: function (passed through to callee).
  //  x2: expected arguments count.

  // The code below is made a lot easier because the calling code already sets
  // up actual and expected registers according to the contract if values are
  // passed in registers.
2429 2430
  DCHECK(actual.is_immediate() || actual.reg().is(x0));
  DCHECK(expected.is_immediate() || expected.reg().is(x2));
2431 2432

  if (expected.is_immediate()) {
2433
    DCHECK(actual.is_immediate());
2434
    Mov(x0, actual.immediate());
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
    if (expected.immediate() == actual.immediate()) {
      definitely_matches = true;

    } else {
      if (expected.immediate() ==
          SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
        // Don't worry about adapting arguments for builtins that
        // don't want that done. Skip adaption code by making it look
        // like we have a match between expected and actual number of
        // arguments.
        definitely_matches = true;
      } else {
        *definitely_mismatches = true;
        // Set up x2 for the argument adaptor.
        Mov(x2, expected.immediate());
      }
    }

  } else {  // expected is a register.
    Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
                                              : Operand(actual.reg());
2456
    Mov(x0, actual_op);
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
    // If actual == expected perform a regular invocation.
    Cmp(expected.reg(), actual_op);
    B(eq, &regular_invoke);
  }

  // If the argument counts may mismatch, generate a call to the argument
  // adaptor.
  if (!definitely_matches) {
    Handle<Code> adaptor =
        isolate()->builtins()->ArgumentsAdaptorTrampoline();
    if (flag == CALL_FUNCTION) {
      call_wrapper.BeforeCall(CallSize(adaptor));
      Call(adaptor);
      call_wrapper.AfterCall();
      if (!*definitely_mismatches) {
        // If the arg counts don't match, no extra code is emitted by
2473
        // MAsm::InvokeFunctionCode and we can just fall through.
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
        B(done);
      }
    } else {
      Jump(adaptor, RelocInfo::CODE_TARGET);
    }
  }
  Bind(&regular_invoke);
}


2484 2485 2486 2487
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
                                             const ParameterCount& expected,
                                             const ParameterCount& actual) {
  Label skip_flooding;
2488 2489 2490 2491 2492 2493
  ExternalReference last_step_action =
      ExternalReference::debug_last_step_action_address(isolate());
  STATIC_ASSERT(StepFrame > StepIn);
  Mov(x4, Operand(last_step_action));
  Ldrsb(x4, MemOperand(x4));
  CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
  {
    FrameScope frame(this,
                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
    if (expected.is_reg()) {
      SmiTag(expected.reg());
      Push(expected.reg());
    }
    if (actual.is_reg()) {
      SmiTag(actual.reg());
      Push(actual.reg());
    }
    if (new_target.is_valid()) {
      Push(new_target);
    }
    Push(fun);
    Push(fun);
2510
    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
    Pop(fun);
    if (new_target.is_valid()) {
      Pop(new_target);
    }
    if (actual.is_reg()) {
      Pop(actual.reg());
      SmiUntag(actual.reg());
    }
    if (expected.is_reg()) {
      Pop(expected.reg());
      SmiUntag(expected.reg());
    }
  }
  bind(&skip_flooding);
}


void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
                                        const ParameterCount& expected,
                                        const ParameterCount& actual,
                                        InvokeFlag flag,
                                        const CallWrapper& call_wrapper) {
2533
  // You can't call a function without a valid frame.
2534
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2535
  DCHECK(function.is(x1));
2536
  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2537 2538 2539 2540

  FloodFunctionIfStepping(function, new_target, expected, actual);

  // Clear the new.target register if not given.
2541 2542 2543
  if (!new_target.is_valid()) {
    LoadRoot(x3, Heap::kUndefinedValueRootIndex);
  }
2544

2545
  Label done;
2546
  bool definitely_mismatches = false;
2547 2548
  InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
                 call_wrapper);
2549 2550 2551 2552 2553

  // If we are certain that actual != expected, then we know InvokePrologue will
  // have handled the call through the argument adaptor mechanism.
  // The called function expects the call kind in x5.
  if (!definitely_mismatches) {
2554 2555 2556 2557 2558
    // We call indirectly through the code field in the function to
    // allow recompilation to take effect without changing any of the
    // call sites.
    Register code = x4;
    Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2559 2560 2561 2562 2563
    if (flag == CALL_FUNCTION) {
      call_wrapper.BeforeCall(CallSize(code));
      Call(code);
      call_wrapper.AfterCall();
    } else {
2564
      DCHECK(flag == JUMP_FUNCTION);
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
      Jump(code);
    }
  }

  // Continue here if InvokePrologue does handle the invocation due to
  // mismatched parameter counts.
  Bind(&done);
}


void MacroAssembler::InvokeFunction(Register function,
2576
                                    Register new_target,
2577 2578 2579 2580
                                    const ParameterCount& actual,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
  // You can't call a function without a valid frame.
2581
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2582 2583 2584

  // Contract with called JS functions requires that function is passed in x1.
  // (See FullCodeGenerator::Generate().)
2585
  DCHECK(function.is(x1));
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599

  Register expected_reg = x2;

  Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
  // The number of arguments is stored as an int32_t, and -1 is a marker
  // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
  // extension to correctly handle it.
  Ldr(expected_reg, FieldMemOperand(function,
                                    JSFunction::kSharedFunctionInfoOffset));
  Ldrsw(expected_reg,
        FieldMemOperand(expected_reg,
                        SharedFunctionInfo::kFormalParameterCountOffset));

  ParameterCount expected(expected_reg);
2600 2601
  InvokeFunctionCode(function, new_target, expected, actual, flag,
                     call_wrapper);
2602 2603 2604 2605 2606 2607 2608 2609 2610
}


void MacroAssembler::InvokeFunction(Register function,
                                    const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
  // You can't call a function without a valid frame.
2611
  DCHECK(flag == JUMP_FUNCTION || has_frame());
2612 2613 2614

  // Contract with called JS functions requires that function is passed in x1.
  // (See FullCodeGenerator::Generate().)
2615
  DCHECK(function.Is(x1));
2616 2617 2618 2619

  // Set up the context.
  Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));

2620
  InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
}


void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                    const ParameterCount& expected,
                                    const ParameterCount& actual,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
  // Contract with called JS functions requires that function is passed in x1.
  // (See FullCodeGenerator::Generate().)
  __ LoadObject(x1, function);
  InvokeFunction(x1, expected, actual, flag, call_wrapper);
}


2636 2637 2638 2639
void MacroAssembler::TryConvertDoubleToInt64(Register result,
                                             DoubleRegister double_input,
                                             Label* done) {
  // Try to convert with an FPU convert instruction. It's trivial to compute
2640
  // the modulo operation on an integer register so we convert to a 64-bit
2641
  // integer.
2642 2643 2644 2645
  //
  // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
  // when the double is out of range. NaNs and infinities will be converted to 0
  // (as ECMA-262 requires).
2646
  Fcvtzs(result.X(), double_input);
2647 2648 2649 2650 2651 2652 2653

  // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
  // representable using a double, so if the result is one of those then we know
  // that saturation occured, and we need to manually handle the conversion.
  //
  // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
  // 1 will cause signed overflow.
2654 2655
  Cmp(result.X(), 1);
  Ccmp(result.X(), -1, VFlag, vc);
2656

2657 2658
  B(vc, done);
}
2659 2660


2661 2662 2663
void MacroAssembler::TruncateDoubleToI(Register result,
                                       DoubleRegister double_input) {
  Label done;
2664

2665 2666 2667
  // Try to convert the double to an int64. If successful, the bottom 32 bits
  // contain our truncated int32 result.
  TryConvertDoubleToInt64(result, double_input, &done);
2668

2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
  const Register old_stack_pointer = StackPointer();
  if (csp.Is(old_stack_pointer)) {
    // This currently only happens during compiler-unittest. If it arises
    // during regular code generation the DoubleToI stub should be updated to
    // cope with csp and have an extra parameter indicating which stack pointer
    // it should use.
    Push(jssp, xzr);  // Push xzr to maintain csp required 16-bytes alignment.
    Mov(jssp, csp);
    SetStackPointer(jssp);
  }

2680
  // If we fell through then inline version didn't succeed - call stub instead.
2681
  Push(lr, double_input);
2682

2683 2684
  DoubleToIStub stub(isolate(),
                     jssp,
2685 2686 2687 2688 2689 2690
                     result,
                     0,
                     true,   // is_truncating
                     true);  // skip_fastpath
  CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber

2691 2692 2693 2694 2695 2696 2697 2698 2699
  DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
  Pop(xzr, lr);  // xzr to drop the double input on the stack.

  if (csp.Is(old_stack_pointer)) {
    Mov(csp, jssp);
    SetStackPointer(csp);
    AssertStackConsistency();
    Pop(xzr, jssp);
  }
2700 2701 2702 2703 2704

  Bind(&done);
}


2705 2706 2707
void MacroAssembler::TruncateHeapNumberToI(Register result,
                                           Register object) {
  Label done;
2708 2709
  DCHECK(!result.is(object));
  DCHECK(jssp.Is(StackPointer()));
2710 2711

  Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2712 2713 2714 2715

  // Try to convert the double to an int64. If successful, the bottom 32 bits
  // contain our truncated int32 result.
  TryConvertDoubleToInt64(result, fp_scratch, &done);
2716 2717 2718

  // If we fell through then inline version didn't succeed - call stub instead.
  Push(lr);
2719 2720
  DoubleToIStub stub(isolate(),
                     object,
2721 2722 2723 2724 2725 2726 2727 2728
                     result,
                     HeapNumber::kValueOffset - kHeapObjectTag,
                     true,   // is_truncating
                     true);  // skip_fastpath
  CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber
  Pop(lr);

  Bind(&done);
2729 2730
}

2731
void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
2732
  UseScratchRegisterScope temps(this);
2733
  frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
2734
  Register temp = temps.AcquireX();
2735 2736 2737 2738 2739
  Mov(temp, Smi::FromInt(type));
  Push(lr, fp);
  Mov(fp, StackPointer());
  Claim(frame_slots);
  str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2740 2741 2742 2743 2744 2745
}

void MacroAssembler::Prologue(bool code_pre_aging) {
  if (code_pre_aging) {
    Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
    __ EmitCodeAgeSequence(stub);
2746
  } else {
2747
    __ EmitFrameSetupForCodeAgePatching();
2748 2749 2750 2751
  }
}


2752 2753
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
  Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2754 2755
  Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
  Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
2756 2757 2758
}


2759 2760 2761 2762 2763 2764 2765
void MacroAssembler::EnterFrame(StackFrame::Type type,
                                bool load_constant_pool_pointer_reg) {
  // Out-of-line constant pool not implemented on arm64.
  UNREACHABLE();
}


2766
void MacroAssembler::EnterFrame(StackFrame::Type type) {
2767
  DCHECK(jssp.Is(StackPointer()));
2768 2769 2770 2771
  UseScratchRegisterScope temps(this);
  Register type_reg = temps.AcquireX();
  Register code_reg = temps.AcquireX();

2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
  if (type == StackFrame::INTERNAL) {
    Mov(type_reg, Smi::FromInt(type));
    Push(lr, fp);
    Push(type_reg);
    Mov(code_reg, Operand(CodeObject()));
    Push(code_reg);
    Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
    // jssp[4] : lr
    // jssp[3] : fp
    // jssp[1] : type
    // jssp[0] : [code object]
  } else {
    Mov(type_reg, Smi::FromInt(type));
    Push(lr, fp);
    Push(type_reg);
    Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
    // jssp[2] : lr
    // jssp[1] : fp
    // jssp[0] : type
  }
2792 2793 2794 2795
}


void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2796
  DCHECK(jssp.Is(StackPointer()));
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813
  // Drop the execution stack down to the frame pointer and restore
  // the caller frame pointer and return address.
  Mov(jssp, fp);
  AssertStackConsistency();
  Pop(fp, lr);
}


void MacroAssembler::ExitFramePreserveFPRegs() {
  PushCPURegList(kCallerSavedFP);
}


void MacroAssembler::ExitFrameRestoreFPRegs() {
  // Read the registers from the stack without popping them. The stack pointer
  // will be reset as part of the unwinding process.
  CPURegList saved_fp_regs = kCallerSavedFP;
2814
  DCHECK(saved_fp_regs.Count() % 2 == 0);
2815 2816 2817 2818 2819

  int offset = ExitFrameConstants::kLastExitFrameField;
  while (!saved_fp_regs.IsEmpty()) {
    const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
    const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2820
    offset -= 2 * kDRegSize;
2821 2822 2823 2824
    Ldp(dst1, dst0, MemOperand(fp, offset));
  }
}

2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
                                       Register argc) {
  Push(lr, fp, context, target);
  add(fp, jssp, Operand(2 * kPointerSize));
  Push(argc);
}

void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
                                       Register argc) {
  Pop(argc);
  Pop(target, context, fp, lr);
}

2838 2839 2840
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
                                    int extra_space,
                                    StackFrame::Type frame_type) {
2841
  DCHECK(jssp.Is(StackPointer()));
2842 2843
  DCHECK(frame_type == StackFrame::EXIT ||
         frame_type == StackFrame::BUILTIN_EXIT);
2844 2845 2846 2847

  // Set up the new stack frame.
  Push(lr, fp);
  Mov(fp, StackPointer());
2848
  Mov(scratch, Smi::FromInt(frame_type));
2849 2850 2851 2852
  Push(scratch);
  Push(xzr);
  Mov(scratch, Operand(CodeObject()));
  Push(scratch);
2853 2854
  //          fp[8]: CallerPC (lr)
  //    fp -> fp[0]: CallerFP (old fp)
2855 2856 2857 2858
  //          fp[-8]: STUB marker
  //          fp[-16]: Space reserved for SPOffset.
  //  jssp -> fp[-24]: CodeObject()
  STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
2859 2860
  STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
  STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2861 2862
  STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
  STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2863 2864 2865 2866 2867 2868 2869 2870 2871

  // Save the frame pointer and context pointer in the top frame.
  Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
                                         isolate())));
  Str(fp, MemOperand(scratch));
  Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
                                         isolate())));
  Str(cp, MemOperand(scratch));

2872
  STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
2873 2874 2875 2876 2877 2878 2879
  if (save_doubles) {
    ExitFramePreserveFPRegs();
  }

  // Reserve space for the return address and for user requested memory.
  // We do this before aligning to make sure that we end up correctly
  // aligned with the minimum of wasted space.
2880
  Claim(extra_space + 1, kXRegSize);
2881 2882
  //         fp[8]: CallerPC (lr)
  //   fp -> fp[0]: CallerFP (old fp)
2883 2884 2885 2886
  //         fp[-8]: STUB marker
  //         fp[-16]: Space reserved for SPOffset.
  //         fp[-24]: CodeObject()
  //         fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2887 2888 2889 2890 2891
  //         jssp[8]: Extra space reserved for caller (if extra_space != 0).
  // jssp -> jssp[0]: Space reserved for the return address.

  // Align and synchronize the system stack pointer with jssp.
  AlignAndSetCSPForFrame();
2892
  DCHECK(csp.Is(StackPointer()));
2893 2894 2895

  //         fp[8]: CallerPC (lr)
  //   fp -> fp[0]: CallerFP (old fp)
2896 2897 2898 2899
  //         fp[-8]: STUB marker
  //         fp[-16]: Space reserved for SPOffset.
  //         fp[-24]: CodeObject()
  //         fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2900 2901 2902 2903 2904 2905 2906 2907
  //         csp[8]: Memory reserved for the caller if extra_space != 0.
  //                 Alignment padding, if necessary.
  //  csp -> csp[0]: Space reserved for the return address.

  // ExitFrame::GetStateForFramePointer expects to find the return address at
  // the memory address immediately below the pointer stored in SPOffset.
  // It is not safe to derive much else from SPOffset, because the size of the
  // padding can vary.
2908
  Add(scratch, csp, kXRegSize);
2909 2910 2911 2912 2913 2914 2915 2916
  Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}


// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
                                    const Register& scratch,
                                    bool restore_context) {
2917
  DCHECK(csp.Is(StackPointer()));
2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955

  if (restore_doubles) {
    ExitFrameRestoreFPRegs();
  }

  // Restore the context pointer from the top frame.
  if (restore_context) {
    Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
                                           isolate())));
    Ldr(cp, MemOperand(scratch));
  }

  if (emit_debug_code()) {
    // Also emit debug code to clear the cp in the top frame.
    Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
                                           isolate())));
    Str(xzr, MemOperand(scratch));
  }
  // Clear the frame pointer from the top frame.
  Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
                                         isolate())));
  Str(xzr, MemOperand(scratch));

  // Pop the exit frame.
  //         fp[8]: CallerPC (lr)
  //   fp -> fp[0]: CallerFP (old fp)
  //         fp[...]: The rest of the frame.
  Mov(jssp, fp);
  SetStackPointer(jssp);
  AssertStackConsistency();
  Pop(fp, lr);
}


void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                Register scratch1, Register scratch2) {
  if (FLAG_native_code_counters && counter->Enabled()) {
    Mov(scratch1, value);
2956
    Mov(scratch2, ExternalReference(counter));
2957
    Str(scratch1.W(), MemOperand(scratch2));
2958 2959 2960 2961 2962 2963
  }
}


void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
                                      Register scratch1, Register scratch2) {
2964
  DCHECK(value != 0);
2965
  if (FLAG_native_code_counters && counter->Enabled()) {
2966
    Mov(scratch2, ExternalReference(counter));
2967 2968 2969
    Ldr(scratch1.W(), MemOperand(scratch2));
    Add(scratch1.W(), scratch1.W(), value);
    Str(scratch1.W(), MemOperand(scratch2));
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
  }
}


void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
                                      Register scratch1, Register scratch2) {
  IncrementCounter(counter, -value, scratch1, scratch2);
}


void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
  if (context_chain_length > 0) {
    // Move up the chain of contexts to the context containing the slot.
    Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
    for (int i = 1; i < context_chain_length; i++) {
      Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
    }
  } else {
    // Slot is in the current function context.  Move it into the
    // destination register in case we store into it (the write barrier
    // cannot be allowed to destroy the context in cp).
    Mov(dst, cp);
  }
}


void MacroAssembler::DebugBreak() {
  Mov(x0, 0);
2998
  Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
2999
  CEntryStub ces(isolate(), 1);
3000
  DCHECK(AllowThisStubCall(&ces));
3001
  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
3002 3003 3004
}


3005
void MacroAssembler::PushStackHandler() {
3006
  DCHECK(jssp.Is(StackPointer()));
3007
  // Adjust this code if the asserts don't hold.
3008
  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3009 3010 3011 3012 3013 3014
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);

  // For the JSEntry handler, we must preserve the live registers x0-x4.
  // (See JSEntryStub::GenerateBody().)

  // Link the current handler as the next handler.
3015
  Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3016 3017
  Ldr(x10, MemOperand(x11));
  Push(x10);
3018

3019 3020 3021 3022 3023
  // Set this new handler as the current one.
  Str(jssp, MemOperand(x11));
}


3024
void MacroAssembler::PopStackHandler() {
3025 3026
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
  Pop(x10);
3027
  Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3028
  Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
  Str(x10, MemOperand(x11));
}


void MacroAssembler::Allocate(int object_size,
                              Register result,
                              Register scratch1,
                              Register scratch2,
                              Label* gc_required,
                              AllocationFlags flags) {
3039
  DCHECK(object_size <= kMaxRegularHeapObjectSize);
3040
  DCHECK((flags & ALLOCATION_FOLDED) == 0);
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052
  if (!FLAG_inline_new) {
    if (emit_debug_code()) {
      // Trash the registers to simulate an allocation failure.
      // We apply salt to the original zap value to easily spot the values.
      Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
      Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
      Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
    }
    B(gc_required);
    return;
  }

3053 3054 3055
  UseScratchRegisterScope temps(this);
  Register scratch3 = temps.AcquireX();

3056 3057
  DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
  DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3058 3059 3060 3061 3062

  // Make object size into bytes.
  if ((flags & SIZE_IN_WORDS) != 0) {
    object_size *= kPointerSize;
  }
3063
  DCHECK(0 == (object_size & kObjectAlignmentMask));
3064 3065 3066 3067 3068 3069 3070 3071 3072

  // Check relative positions of allocation top and limit addresses.
  // The values must be adjacent in memory to allow the use of LDP.
  ExternalReference heap_allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
  ExternalReference heap_allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
  intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
  intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3073
  DCHECK((limit - top) == kPointerSize);
3074

3075
  // Set up allocation top address and allocation limit registers.
3076
  Register top_address = scratch1;
3077 3078
  Register alloc_limit = scratch2;
  Register result_end = scratch3;
3079 3080 3081
  Mov(top_address, Operand(heap_allocation_top));

  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3082 3083
    // Load allocation top into result and allocation limit into alloc_limit.
    Ldp(result, alloc_limit, MemOperand(top_address));
3084 3085 3086
  } else {
    if (emit_debug_code()) {
      // Assert that result actually contains top on entry.
3087 3088
      Ldr(alloc_limit, MemOperand(top_address));
      Cmp(result, alloc_limit);
3089 3090
      Check(eq, kUnexpectedAllocationTop);
    }
3091 3092
    // Load allocation limit. Result already contains allocation top.
    Ldr(alloc_limit, MemOperand(top_address, limit - top));
3093 3094 3095
  }

  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3096
  // the same alignment on ARM64.
3097 3098 3099
  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);

  // Calculate new top and bail out if new space is exhausted.
3100
  Adds(result_end, result, object_size);
3101
  Ccmp(result_end, alloc_limit, NoFlag, cc);
3102
  B(hi, gc_required);
3103 3104 3105 3106 3107

  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
    // The top pointer is not updated for allocation folding dominators.
    Str(result_end, MemOperand(top_address));
  }
3108 3109 3110

  // Tag the object.
  ObjectTag(result, result);
3111 3112 3113
}


3114 3115 3116
void MacroAssembler::Allocate(Register object_size, Register result,
                              Register result_end, Register scratch,
                              Label* gc_required, AllocationFlags flags) {
3117 3118 3119 3120 3121
  if (!FLAG_inline_new) {
    if (emit_debug_code()) {
      // Trash the registers to simulate an allocation failure.
      // We apply salt to the original zap value to easily spot the values.
      Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3122 3123
      Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
      Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
3124 3125 3126 3127 3128
    }
    B(gc_required);
    return;
  }

3129
  UseScratchRegisterScope temps(this);
3130
  Register scratch2 = temps.AcquireX();
3131

3132 3133 3134
  // |object_size| and |result_end| may overlap, other registers must not.
  DCHECK(!AreAliased(object_size, result, scratch, scratch2));
  DCHECK(!AreAliased(result_end, result, scratch, scratch2));
3135 3136
  DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
         result_end.Is64Bits());
3137 3138 3139 3140 3141 3142 3143 3144 3145

  // Check relative positions of allocation top and limit addresses.
  // The values must be adjacent in memory to allow the use of LDP.
  ExternalReference heap_allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
  ExternalReference heap_allocation_limit =
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
  intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
  intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3146
  DCHECK((limit - top) == kPointerSize);
3147

3148
  // Set up allocation top address and allocation limit registers.
3149
  Register top_address = scratch;
3150
  Register alloc_limit = scratch2;
3151
  Mov(top_address, heap_allocation_top);
3152 3153

  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3154 3155
    // Load allocation top into result and allocation limit into alloc_limit.
    Ldp(result, alloc_limit, MemOperand(top_address));
3156 3157 3158
  } else {
    if (emit_debug_code()) {
      // Assert that result actually contains top on entry.
3159 3160
      Ldr(alloc_limit, MemOperand(top_address));
      Cmp(result, alloc_limit);
3161 3162
      Check(eq, kUnexpectedAllocationTop);
    }
3163 3164
    // Load allocation limit. Result already contains allocation top.
    Ldr(alloc_limit, MemOperand(top_address, limit - top));
3165 3166 3167
  }

  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3168
  // the same alignment on ARM64.
3169 3170 3171 3172
  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);

  // Calculate new top and bail out if new space is exhausted
  if ((flags & SIZE_IN_WORDS) != 0) {
3173
    Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3174
  } else {
3175
    Adds(result_end, result, object_size);
3176 3177 3178
  }

  if (emit_debug_code()) {
3179
    Tst(result_end, kObjectAlignmentMask);
3180
    Check(eq, kUnalignedAllocationInNewSpace);
3181 3182
  }

3183
  Ccmp(result_end, alloc_limit, NoFlag, cc);
3184
  B(hi, gc_required);
3185 3186 3187 3188 3189

  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
    // The top pointer is not updated for allocation folding dominators.
    Str(result_end, MemOperand(top_address));
  }
3190 3191 3192

  // Tag the object.
  ObjectTag(result, result);
3193 3194
}

3195 3196 3197
void MacroAssembler::FastAllocate(int object_size, Register result,
                                  Register scratch1, Register scratch2,
                                  AllocationFlags flags) {
3198
  DCHECK(object_size <= kMaxRegularHeapObjectSize);
3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224

  DCHECK(!AreAliased(result, scratch1, scratch2));
  DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());

  // Make object size into bytes.
  if ((flags & SIZE_IN_WORDS) != 0) {
    object_size *= kPointerSize;
  }
  DCHECK(0 == (object_size & kObjectAlignmentMask));

  ExternalReference heap_allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);

  // Set up allocation top address and allocation limit registers.
  Register top_address = scratch1;
  Register result_end = scratch2;
  Mov(top_address, Operand(heap_allocation_top));
  Ldr(result, MemOperand(top_address));

  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
  // the same alignment on ARM64.
  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);

  // Calculate new top and write it back.
  Adds(result_end, result, object_size);
  Str(result_end, MemOperand(top_address));
3225 3226

  ObjectTag(result, result);
3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
}

void MacroAssembler::FastAllocate(Register object_size, Register result,
                                  Register result_end, Register scratch,
                                  AllocationFlags flags) {
  // |object_size| and |result_end| may overlap, other registers must not.
  DCHECK(!AreAliased(object_size, result, scratch));
  DCHECK(!AreAliased(result_end, result, scratch));
  DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
         result_end.Is64Bits());

  ExternalReference heap_allocation_top =
      AllocationUtils::GetAllocationTopReference(isolate(), flags);

  // Set up allocation top address and allocation limit registers.
  Register top_address = scratch;
  Mov(top_address, heap_allocation_top);
  Ldr(result, MemOperand(top_address));

  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
  // the same alignment on ARM64.
  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);

  // Calculate new top and write it back.
  if ((flags & SIZE_IN_WORDS) != 0) {
    Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
  } else {
    Adds(result_end, result, object_size);
  }
  Str(result_end, MemOperand(top_address));

  if (emit_debug_code()) {
    Tst(result_end, kObjectAlignmentMask);
3260
    Check(eq, kUnalignedAllocationInNewSpace);
3261
  }
3262 3263

  ObjectTag(result, result);
3264
}
3265 3266 3267 3268 3269 3270 3271

void MacroAssembler::AllocateTwoByteString(Register result,
                                           Register length,
                                           Register scratch1,
                                           Register scratch2,
                                           Register scratch3,
                                           Label* gc_required) {
3272
  DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3273 3274 3275 3276 3277 3278 3279 3280
  // Calculate the number of bytes needed for the characters in the string while
  // observing object alignment.
  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
  Add(scratch1, length, length);  // Length in bytes, not chars.
  Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
  Bic(scratch1, scratch1, kObjectAlignmentMask);

  // Allocate two-byte string in new space.
3281 3282
  Allocate(scratch1, result, scratch2, scratch3, gc_required,
           NO_ALLOCATION_FLAGS);
3283 3284 3285 3286 3287 3288 3289 3290 3291 3292

  // Set the map, length and hash field.
  InitializeNewString(result,
                      length,
                      Heap::kStringMapRootIndex,
                      scratch1,
                      scratch2);
}


3293 3294 3295 3296
void MacroAssembler::AllocateOneByteString(Register result, Register length,
                                           Register scratch1, Register scratch2,
                                           Register scratch3,
                                           Label* gc_required) {
3297
  DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3298 3299 3300 3301 3302 3303 3304
  // Calculate the number of bytes needed for the characters in the string while
  // observing object alignment.
  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
  STATIC_ASSERT(kCharSize == 1);
  Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
  Bic(scratch1, scratch1, kObjectAlignmentMask);

3305
  // Allocate one-byte string in new space.
3306 3307
  Allocate(scratch1, result, scratch2, scratch3, gc_required,
           NO_ALLOCATION_FLAGS);
3308 3309

  // Set the map, length and hash field.
3310 3311
  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
                      scratch1, scratch2);
3312 3313 3314 3315 3316 3317 3318 3319 3320
}


void MacroAssembler::AllocateTwoByteConsString(Register result,
                                               Register length,
                                               Register scratch1,
                                               Register scratch2,
                                               Label* gc_required) {
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3321
           NO_ALLOCATION_FLAGS);
3322 3323 3324 3325 3326 3327 3328 3329 3330

  InitializeNewString(result,
                      length,
                      Heap::kConsStringMapRootIndex,
                      scratch1,
                      scratch2);
}


3331 3332 3333 3334
void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
                                               Register scratch1,
                                               Register scratch2,
                                               Label* gc_required) {
3335 3336
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
           NO_ALLOCATION_FLAGS);
3337

3338 3339
  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                      scratch1, scratch2);
3340 3341 3342 3343 3344 3345 3346 3347
}


void MacroAssembler::AllocateTwoByteSlicedString(Register result,
                                                 Register length,
                                                 Register scratch1,
                                                 Register scratch2,
                                                 Label* gc_required) {
3348
  DCHECK(!AreAliased(result, length, scratch1, scratch2));
3349
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3350
           NO_ALLOCATION_FLAGS);
3351 3352 3353 3354 3355 3356 3357 3358 3359

  InitializeNewString(result,
                      length,
                      Heap::kSlicedStringMapRootIndex,
                      scratch1,
                      scratch2);
}


3360 3361 3362 3363 3364
void MacroAssembler::AllocateOneByteSlicedString(Register result,
                                                 Register length,
                                                 Register scratch1,
                                                 Register scratch2,
                                                 Label* gc_required) {
3365
  DCHECK(!AreAliased(result, length, scratch1, scratch2));
3366
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3367
           NO_ALLOCATION_FLAGS);
3368

3369 3370
  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                      scratch1, scratch2);
3371 3372 3373 3374 3375 3376 3377 3378 3379
}


// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
                                        Label* gc_required,
                                        Register scratch1,
                                        Register scratch2,
3380
                                        CPURegister value,
3381 3382
                                        CPURegister heap_number_map,
                                        MutableMode mode) {
3383
  DCHECK(!value.IsValid() || value.Is64Bits());
3384 3385
  UseScratchRegisterScope temps(this);

3386 3387 3388
  // Allocate an object in the heap for the heap number and tag it as a heap
  // object.
  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3389 3390
           NO_ALLOCATION_FLAGS);

3391 3392 3393 3394
  Heap::RootListIndex map_index = mode == MUTABLE
      ? Heap::kMutableHeapNumberMapRootIndex
      : Heap::kHeapNumberMapRootIndex;

3395 3396 3397 3398 3399 3400 3401 3402 3403
  // Prepare the heap number map.
  if (!heap_number_map.IsValid()) {
    // If we have a valid value register, use the same type of register to store
    // the map so we can use STP to store both in one instruction.
    if (value.IsValid() && value.IsFPRegister()) {
      heap_number_map = temps.AcquireD();
    } else {
      heap_number_map = scratch1;
    }
3404
    LoadRoot(heap_number_map, map_index);
3405
  }
3406 3407 3408 3409 3410 3411 3412 3413
  if (emit_debug_code()) {
    Register map;
    if (heap_number_map.IsFPRegister()) {
      map = scratch1;
      Fmov(map, DoubleRegister(heap_number_map));
    } else {
      map = Register(heap_number_map);
    }
3414
    AssertRegisterIsRoot(map, map_index);
3415
  }
3416

3417 3418 3419 3420
  // Store the heap number map and the value in the allocated object.
  if (value.IsSameSizeAndType(heap_number_map)) {
    STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
                  HeapNumber::kValueOffset);
3421 3422
    Stp(heap_number_map, value,
        FieldMemOperand(result, HeapObject::kMapOffset));
3423
  } else {
3424
    Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3425
    if (value.IsValid()) {
3426
      Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3427 3428
    }
  }
3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442
}


void MacroAssembler::JumpIfObjectType(Register object,
                                      Register map,
                                      Register type_reg,
                                      InstanceType type,
                                      Label* if_cond_pass,
                                      Condition cond) {
  CompareObjectType(object, map, type_reg, type);
  B(cond, if_cond_pass);
}


3443 3444 3445 3446 3447 3448 3449 3450 3451
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
                                     Register value, Register scratch1,
                                     Register scratch2, Label* gc_required) {
  DCHECK(!result.is(constructor));
  DCHECK(!result.is(scratch1));
  DCHECK(!result.is(scratch2));
  DCHECK(!result.is(value));

  // Allocate JSValue in new space.
3452 3453
  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
           NO_ALLOCATION_FLAGS);
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465

  // Initialize the JSValue.
  LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
  Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
  LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
  Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
  Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
  Str(value, FieldMemOperand(result, JSValue::kValueOffset));
  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}


3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
void MacroAssembler::JumpIfNotObjectType(Register object,
                                         Register map,
                                         Register type_reg,
                                         InstanceType type,
                                         Label* if_not_object) {
  JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
}


// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareObjectType(Register object,
                                       Register map,
                                       Register type_reg,
                                       InstanceType type) {
  Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
  CompareInstanceType(map, type_reg, type);
}


// Sets condition flags based on comparison, and returns type in type_reg.
void MacroAssembler::CompareInstanceType(Register map,
                                         Register type_reg,
                                         InstanceType type) {
  Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
  Cmp(type_reg, type);
}


3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
  UseScratchRegisterScope temps(this);
  Register obj_map = temps.AcquireX();
  Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
  CompareRoot(obj_map, index);
}


void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
                                      Handle<Map> map) {
3504
  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3505
  CompareMap(scratch, map);
3506 3507 3508 3509
}


void MacroAssembler::CompareMap(Register obj_map,
3510
                                Handle<Map> map) {
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
  Cmp(obj_map, Operand(map));
}


void MacroAssembler::CheckMap(Register obj,
                              Register scratch,
                              Handle<Map> map,
                              Label* fail,
                              SmiCheckType smi_check_type) {
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(obj, fail);
  }

3524
  CompareObjectMap(obj, scratch, map);
3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
  B(ne, fail);
}


void MacroAssembler::CheckMap(Register obj,
                              Register scratch,
                              Heap::RootListIndex index,
                              Label* fail,
                              SmiCheckType smi_check_type) {
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(obj, fail);
  }
  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
  JumpIfNotRoot(scratch, index, fail);
}


void MacroAssembler::CheckMap(Register obj_map,
                              Handle<Map> map,
                              Label* fail,
                              SmiCheckType smi_check_type) {
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(obj_map, fail);
  }
3549 3550

  CompareMap(obj_map, map);
3551 3552 3553 3554
  B(ne, fail);
}


3555 3556 3557 3558
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
                                     Register scratch2, Handle<WeakCell> cell,
                                     Handle<Code> success,
                                     SmiCheckType smi_check_type) {
3559 3560 3561 3562
  Label fail;
  if (smi_check_type == DO_SMI_CHECK) {
    JumpIfSmi(obj, &fail);
  }
3563 3564
  Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
  CmpWeakValue(scratch1, cell, scratch2);
3565 3566 3567 3568 3569 3570
  B(ne, &fail);
  Jump(success, RelocInfo::CODE_TARGET);
  Bind(&fail);
}


3571 3572 3573 3574 3575 3576 3577 3578
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
                                  Register scratch) {
  Mov(scratch, Operand(cell));
  Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
  Cmp(value, scratch);
}


3579
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3580 3581
  Mov(value, Operand(cell));
  Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
3582 3583 3584 3585 3586 3587
}


void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
                                   Label* miss) {
  GetWeakValue(value, cell);
3588 3589 3590 3591
  JumpIfSmi(value, miss);
}


3592
void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3593 3594 3595 3596 3597
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
  Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
  Tst(temp, mask);
3598 3599 3600
}


3601
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3602
  // Load the map's "bit field 2".
3603
  __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3604
  // Retrieve elements_kind from bit field 2.
3605
  DecodeField<Map::ElementsKindBits>(result);
3606 3607 3608
}


3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
void MacroAssembler::GetMapConstructor(Register result, Register map,
                                       Register temp, Register temp2) {
  Label done, loop;
  Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
  Bind(&loop);
  JumpIfSmi(result, &done);
  CompareObjectType(result, temp, temp2, MAP_TYPE);
  B(ne, &done);
  Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
  B(&loop);
  Bind(&done);
}


3623 3624
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
                                             Register scratch, Label* miss) {
3625
  DCHECK(!AreAliased(function, result, scratch));
3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647

  // Get the prototype or initial map from the function.
  Ldr(result,
      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));

  // If the prototype or initial map is the hole, don't return it and simply
  // miss the cache instead. This will allow us to allocate a prototype object
  // on-demand in the runtime system.
  JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);

  // If the function does not have an initial map, we're done.
  Label done;
  JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);

  // Get the prototype from the initial map.
  Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));

  // All done.
  Bind(&done);
}


3648 3649 3650 3651 3652 3653 3654 3655
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
  LoadRoot(temp, index);
  Push(temp);
}


3656 3657
void MacroAssembler::CompareRoot(const Register& obj,
                                 Heap::RootListIndex index) {
3658 3659
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
3660
  DCHECK(!AreAliased(obj, temp));
3661 3662
  LoadRoot(temp, index);
  Cmp(obj, temp);
3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
}


void MacroAssembler::JumpIfRoot(const Register& obj,
                                Heap::RootListIndex index,
                                Label* if_equal) {
  CompareRoot(obj, index);
  B(eq, if_equal);
}


void MacroAssembler::JumpIfNotRoot(const Register& obj,
                                   Heap::RootListIndex index,
                                   Label* if_not_equal) {
  CompareRoot(obj, index);
  B(ne, if_not_equal);
}


void MacroAssembler::CompareAndSplit(const Register& lhs,
                                     const Operand& rhs,
                                     Condition cond,
                                     Label* if_true,
                                     Label* if_false,
                                     Label* fall_through) {
  if ((if_true == if_false) && (if_false == fall_through)) {
    // Fall through.
  } else if (if_true == if_false) {
    B(if_true);
  } else if (if_false == fall_through) {
    CompareAndBranch(lhs, rhs, cond, if_true);
  } else if (if_true == fall_through) {
3695
    CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738
  } else {
    CompareAndBranch(lhs, rhs, cond, if_true);
    B(if_false);
  }
}


void MacroAssembler::TestAndSplit(const Register& reg,
                                  uint64_t bit_pattern,
                                  Label* if_all_clear,
                                  Label* if_any_set,
                                  Label* fall_through) {
  if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
    // Fall through.
  } else if (if_all_clear == if_any_set) {
    B(if_all_clear);
  } else if (if_all_clear == fall_through) {
    TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
  } else if (if_any_set == fall_through) {
    TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
  } else {
    TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
    B(if_all_clear);
  }
}

void MacroAssembler::CheckFastObjectElements(Register map,
                                             Register scratch,
                                             Label* fail) {
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
  STATIC_ASSERT(FAST_ELEMENTS == 2);
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
  Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
  // If cond==ls, set cond=hi, otherwise compare.
  Ccmp(scratch,
       Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
  B(hi, fail);
}


// Note: The ARM version of this clobbers elements_reg, but this version does
3739
// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3740 3741 3742 3743 3744 3745 3746
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
                                                 Register key_reg,
                                                 Register elements_reg,
                                                 Register scratch1,
                                                 FPRegister fpscratch1,
                                                 Label* fail,
                                                 int elements_offset) {
3747
  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
  Label store_num;

  // Speculatively convert the smi to a double - all smis can be exactly
  // represented as a double.
  SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);

  // If value_reg is a smi, we're done.
  JumpIfSmi(value_reg, &store_num);

  // Ensure that the object is a heap number.
3758
  JumpIfNotHeapNumber(value_reg, fail);
3759 3760 3761

  Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));

3762 3763
  // Canonicalize NaNs.
  CanonicalizeNaN(fpscratch1);
3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784

  // Store the result.
  Bind(&store_num);
  Add(scratch1, elements_reg,
      Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
  Str(fpscratch1,
      FieldMemOperand(scratch1,
                      FixedDoubleArray::kHeaderSize - elements_offset));
}


bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
  return has_frame_ || !stub->SometimesSetsUpAFrame();
}

void MacroAssembler::EmitSeqStringSetCharCheck(
    Register string,
    Register index,
    SeqStringSetCharCheckIndexType index_type,
    Register scratch,
    uint32_t encoding_mask) {
3785
  DCHECK(!AreAliased(string, index, scratch));
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805

  if (index_type == kIndexIsSmi) {
    AssertSmi(index);
  }

  // Check that string is an object.
  AssertNotSmi(string, kNonObject);

  // Check that string has an appropriate map.
  Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
  Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));

  And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
  Cmp(scratch, encoding_mask);
  Check(eq, kUnexpectedStringType);

  Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
  Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
  Check(lt, kIndexIsTooLarge);

3806
  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
3807 3808 3809 3810 3811 3812
  Cmp(index, 0);
  Check(ge, kIndexIsNegative);
}


void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3813 3814
                                            Register scratch1,
                                            Register scratch2,
3815
                                            Label* miss) {
3816
  DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3817 3818
  Label same_contexts;

3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831
  // Load current lexical context from the active StandardFrame, which
  // may require crawling past STUB frames.
  Label load_context;
  Label has_context;
  Mov(scratch2, fp);
  bind(&load_context);
  Ldr(scratch1,
      MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
  JumpIfNotSmi(scratch1, &has_context);
  Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
  B(&load_context);
  bind(&has_context);

3832 3833
  // In debug mode, make sure the lexical context is set.
#ifdef DEBUG
3834
  Cmp(scratch1, 0);
3835 3836 3837 3838
  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
#endif

  // Load the native context of the current context.
3839
  Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
3840 3841 3842

  // Check the context is a native context.
  if (emit_debug_code()) {
3843
    // Read the first word and compare to the native_context_map.
3844 3845
    Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
    CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3846 3847 3848 3849
    Check(eq, kExpectedNativeContext);
  }

  // Check if both contexts are the same.
3850 3851 3852 3853
  Ldr(scratch2, FieldMemOperand(holder_reg,
                                JSGlobalProxy::kNativeContextOffset));
  Cmp(scratch1, scratch2);
  B(&same_contexts, eq);
3854 3855 3856

  // Check the context is a native context.
  if (emit_debug_code()) {
3857 3858 3859 3860 3861
    // We're short on scratch registers here, so use holder_reg as a scratch.
    Push(holder_reg);
    Register scratch3 = holder_reg;

    CompareRoot(scratch2, Heap::kNullValueRootIndex);
3862 3863
    Check(ne, kExpectedNonNullContext);

3864 3865
    Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
    CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3866
    Check(eq, kExpectedNativeContext);
3867
    Pop(holder_reg);
3868 3869 3870 3871 3872 3873 3874 3875
  }

  // Check that the security token in the calling global object is
  // compatible with the security token in the receiving global
  // object.
  int token_offset = Context::kHeaderSize +
                     Context::SECURITY_TOKEN_INDEX * kPointerSize;

3876 3877 3878 3879
  Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
  Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
  Cmp(scratch1, scratch2);
  B(miss, ne);
3880

3881
  Bind(&same_contexts);
3882 3883 3884 3885
}


// Compute the hash code from the untagged key. This must be kept in sync with
3886
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3887 3888
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3889
  DCHECK(!AreAliased(key, scratch));
3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916

  // Xor original key with a seed.
  LoadRoot(scratch, Heap::kHashSeedRootIndex);
  Eor(key, key, Operand::UntagSmi(scratch));

  // The algorithm uses 32-bit integer values.
  key = key.W();
  scratch = scratch.W();

  // Compute the hash code from the untagged key.  This must be kept in sync
  // with ComputeIntegerHash in utils.h.
  //
  // hash = ~hash + (hash <<1 15);
  Mvn(scratch, key);
  Add(key, scratch, Operand(key, LSL, 15));
  // hash = hash ^ (hash >> 12);
  Eor(key, key, Operand(key, LSR, 12));
  // hash = hash + (hash << 2);
  Add(key, key, Operand(key, LSL, 2));
  // hash = hash ^ (hash >> 4);
  Eor(key, key, Operand(key, LSR, 4));
  // hash = hash * 2057;
  Mov(scratch, Operand(key, LSL, 11));
  Add(key, key, Operand(key, LSL, 3));
  Add(key, key, scratch);
  // hash = hash ^ (hash >> 16);
  Eor(key, key, Operand(key, LSR, 16));
3917
  Bic(key, key, Operand(0xc0000000u));
3918 3919
}

3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
                                               Register code_entry,
                                               Register scratch) {
  const int offset = JSFunction::kCodeEntryOffset;

  // Since a code entry (value) is always in old space, we don't need to update
  // remembered set. If incremental marking is off, there is nothing for us to
  // do.
  if (!FLAG_incremental_marking) return;

  DCHECK(js_function.is(x1));
  DCHECK(code_entry.is(x7));
  DCHECK(scratch.is(x5));
  AssertNotSmi(js_function);

  if (emit_debug_code()) {
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();
    Add(scratch, js_function, offset - kHeapObjectTag);
    Ldr(temp, MemOperand(scratch));
    Cmp(temp, code_entry);
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
  }

  // First, check if a write barrier is even needed. The tests below
  // catch stores of Smis and stores into young gen.
  Label done;

  CheckPageFlagClear(code_entry, scratch,
                     MemoryChunk::kPointersToHereAreInterestingMask, &done);
  CheckPageFlagClear(js_function, scratch,
                     MemoryChunk::kPointersFromHereAreInterestingMask, &done);

  const Register dst = scratch;
  Add(dst, js_function, offset - kHeapObjectTag);

  // Save caller-saved registers.Both input registers (x1 and x7) are caller
  // saved, so there is no need to push them.
  PushCPURegList(kCallerSaved);

  int argument_count = 3;

  Mov(x0, js_function);
  Mov(x1, dst);
  Mov(x2, ExternalReference::isolate_address(isolate()));

  {
    AllowExternalCallThatCantCauseGC scope(this);
    CallCFunction(
        ExternalReference::incremental_marking_record_write_code_entry_function(
            isolate()),
        argument_count);
  }

  // Restore caller-saved registers.
  PopCPURegList(kCallerSaved);

  Bind(&done);
}
3979 3980 3981

void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                         Register address,
3982
                                         Register scratch1,
3983 3984
                                         SaveFPRegsMode fp_mode,
                                         RememberedSetFinalAction and_then) {
3985
  DCHECK(!AreAliased(object, address, scratch1));
3986 3987 3988 3989 3990 3991 3992
  Label done, store_buffer_overflow;
  if (emit_debug_code()) {
    Label ok;
    JumpIfNotInNewSpace(object, &ok);
    Abort(kRememberedSetPointerInNewSpace);
    bind(&ok);
  }
3993 3994 3995
  UseScratchRegisterScope temps(this);
  Register scratch2 = temps.AcquireX();

3996
  // Load store buffer top.
3997
  Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
3998
  Ldr(scratch1, MemOperand(scratch2));
3999
  // Store pointer to buffer and increment buffer top.
4000
  Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4001
  // Write back new top of buffer.
4002
  Str(scratch1, MemOperand(scratch2));
4003 4004
  // Call stub on end of buffer.
  // Check for end of buffer.
4005
  Tst(scratch1, StoreBuffer::kStoreBufferMask);
4006
  if (and_then == kFallThroughAtEnd) {
4007
    B(ne, &done);
4008
  } else {
4009
    DCHECK(and_then == kReturnAtEnd);
4010
    B(eq, &store_buffer_overflow);
4011 4012 4013 4014 4015
    Ret();
  }

  Bind(&store_buffer_overflow);
  Push(lr);
4016
  StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
  CallStub(&store_buffer_overflow_stub);
  Pop(lr);

  Bind(&done);
  if (and_then == kReturnAtEnd) {
    Ret();
  }
}


void MacroAssembler::PopSafepointRegisters() {
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
  PopXRegList(kSafepointSavedRegisters);
  Drop(num_unsaved);
}


void MacroAssembler::PushSafepointRegisters() {
  // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
  // adjust the stack for unsaved registers.
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4038
  DCHECK(num_unsaved >= 0);
4039 4040 4041 4042 4043
  Claim(num_unsaved);
  PushXRegList(kSafepointSavedRegisters);
}


4044 4045
void MacroAssembler::PushSafepointRegistersAndDoubles() {
  PushSafepointRegisters();
4046 4047
  PushCPURegList(CPURegList(
      CPURegister::kFPRegister, kDRegSizeInBits,
4048
      RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
4049 4050 4051
}


4052
void MacroAssembler::PopSafepointRegistersAndDoubles() {
4053 4054
  PopCPURegList(CPURegList(
      CPURegister::kFPRegister, kDRegSizeInBits,
4055
      RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
4056
  PopSafepointRegisters();
4057 4058 4059 4060 4061
}


int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
  // Make sure the safepoint registers list is what we expect.
4062
  DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089

  // Safepoint registers are stored contiguously on the stack, but not all the
  // registers are saved. The following registers are excluded:
  //  - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
  //    the macro assembler.
  //  - x28 (jssp) because JS stack pointer doesn't need to be included in
  //    safepoint registers.
  //  - x31 (csp) because the system stack pointer doesn't need to be included
  //    in safepoint registers.
  //
  // This function implements the mapping of register code to index into the
  // safepoint register slots.
  if ((reg_code >= 0) && (reg_code <= 15)) {
    return reg_code;
  } else if ((reg_code >= 18) && (reg_code <= 27)) {
    // Skip ip0 and ip1.
    return reg_code - 2;
  } else if ((reg_code == 29) || (reg_code == 30)) {
    // Also skip jssp.
    return reg_code - 3;
  } else {
    // This register has no safepoint register slot.
    UNREACHABLE();
    return -1;
  }
}

4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
void MacroAssembler::CheckPageFlag(const Register& object,
                                   const Register& scratch, int mask,
                                   Condition cc, Label* condition_met) {
  And(scratch, object, ~Page::kPageAlignmentMask);
  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
  if (cc == eq) {
    TestAndBranchIfAnySet(scratch, mask, condition_met);
  } else {
    TestAndBranchIfAllClear(scratch, mask, condition_met);
  }
}
4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129

void MacroAssembler::CheckPageFlagSet(const Register& object,
                                      const Register& scratch,
                                      int mask,
                                      Label* if_any_set) {
  And(scratch, object, ~Page::kPageAlignmentMask);
  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
  TestAndBranchIfAnySet(scratch, mask, if_any_set);
}


void MacroAssembler::CheckPageFlagClear(const Register& object,
                                        const Register& scratch,
                                        int mask,
                                        Label* if_all_clear) {
  And(scratch, object, ~Page::kPageAlignmentMask);
  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
  TestAndBranchIfAllClear(scratch, mask, if_all_clear);
}


void MacroAssembler::RecordWriteField(
    Register object,
    int offset,
    Register value,
    Register scratch,
    LinkRegisterStatus lr_status,
    SaveFPRegsMode save_fp,
    RememberedSetAction remembered_set_action,
4130 4131
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142
  // First, check if a write barrier is even needed. The tests below
  // catch stores of Smis.
  Label done;

  // Skip the barrier if writing a smi.
  if (smi_check == INLINE_SMI_CHECK) {
    JumpIfSmi(value, &done);
  }

  // Although the object register is tagged, the offset is relative to the start
  // of the object, so offset must be a multiple of kPointerSize.
4143
  DCHECK(IsAligned(offset, kPointerSize));
4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159

  Add(scratch, object, offset - kHeapObjectTag);
  if (emit_debug_code()) {
    Label ok;
    Tst(scratch, (1 << kPointerSizeLog2) - 1);
    B(eq, &ok);
    Abort(kUnalignedCellInWriteBarrier);
    Bind(&ok);
  }

  RecordWrite(object,
              scratch,
              value,
              lr_status,
              save_fp,
              remembered_set_action,
4160 4161
              OMIT_SMI_CHECK,
              pointers_to_here_check_for_value);
4162 4163 4164 4165 4166 4167

  Bind(&done);

  // Clobber clobbered input registers when running with the debug-code flag
  // turned on to provoke errors.
  if (emit_debug_code()) {
4168 4169
    Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
    Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4170 4171 4172 4173
  }
}


4174 4175 4176 4177 4178 4179 4180 4181
// Will clobber: object, map, dst.
// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
void MacroAssembler::RecordWriteForMap(Register object,
                                       Register map,
                                       Register dst,
                                       LinkRegisterStatus lr_status,
                                       SaveFPRegsMode fp_mode) {
  ASM_LOCATION("MacroAssembler::RecordWrite");
4182
  DCHECK(!AreAliased(object, map));
4183 4184 4185 4186 4187

  if (emit_debug_code()) {
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

4188
    CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
  }

  if (!FLAG_incremental_marking) {
    return;
  }

  if (emit_debug_code()) {
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
    Cmp(temp, map);
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
  }

  // First, check if a write barrier is even needed. The tests below
  // catch stores of smis and stores into the young generation.
  Label done;

  // A single check of the map's pages interesting flag suffices, since it is
  // only set during incremental collection, and then it's also guaranteed that
  // the from object's page's interesting flag is also set.  This optimization
  // relies on the fact that maps can never be in new space.
  CheckPageFlagClear(map,
                     map,  // Used as scratch.
                     MemoryChunk::kPointersToHereAreInterestingMask,
                     &done);

  // Record the actual write.
  if (lr_status == kLRHasNotBeenSaved) {
    Push(lr);
  }
  Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
                       fp_mode);
  CallStub(&stub);
  if (lr_status == kLRHasNotBeenSaved) {
    Pop(lr);
  }

  Bind(&done);

4232 4233 4234 4235 4236
  // Count number of write barriers in generated code.
  isolate()->counters()->write_barriers_static()->Increment();
  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
                   dst);

4237 4238 4239
  // Clobber clobbered registers when running with the debug-code flag
  // turned on to provoke errors.
  if (emit_debug_code()) {
4240 4241
    Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
    Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4242 4243 4244 4245
  }
}


4246
// Will clobber: object, address, value.
4247 4248 4249 4250
// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
//
// The register 'object' contains a heap object pointer. The heap object tag is
// shifted away.
4251 4252 4253 4254 4255 4256 4257 4258 4259
void MacroAssembler::RecordWrite(
    Register object,
    Register address,
    Register value,
    LinkRegisterStatus lr_status,
    SaveFPRegsMode fp_mode,
    RememberedSetAction remembered_set_action,
    SmiCheck smi_check,
    PointersToHereCheck pointers_to_here_check_for_value) {
4260
  ASM_LOCATION("MacroAssembler::RecordWrite");
4261
  DCHECK(!AreAliased(object, value));
4262 4263

  if (emit_debug_code()) {
4264 4265 4266 4267 4268
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();

    Ldr(temp, MemOperand(address));
    Cmp(temp, value);
4269 4270 4271 4272 4273 4274 4275 4276
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
  }

  // First, check if a write barrier is even needed. The tests below
  // catch stores of smis and stores into the young generation.
  Label done;

  if (smi_check == INLINE_SMI_CHECK) {
4277
    DCHECK_EQ(0, kSmiTag);
4278 4279 4280
    JumpIfSmi(value, &done);
  }

4281 4282 4283 4284 4285 4286
  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
    CheckPageFlagClear(value,
                       value,  // Used as scratch.
                       MemoryChunk::kPointersToHereAreInterestingMask,
                       &done);
  }
4287 4288 4289 4290 4291 4292 4293 4294 4295
  CheckPageFlagClear(object,
                     value,  // Used as scratch.
                     MemoryChunk::kPointersFromHereAreInterestingMask,
                     &done);

  // Record the actual write.
  if (lr_status == kLRHasNotBeenSaved) {
    Push(lr);
  }
4296 4297
  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
                       fp_mode);
4298 4299 4300 4301 4302 4303 4304
  CallStub(&stub);
  if (lr_status == kLRHasNotBeenSaved) {
    Pop(lr);
  }

  Bind(&done);

4305 4306 4307 4308 4309
  // Count number of write barriers in generated code.
  isolate()->counters()->write_barriers_static()->Increment();
  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
                   value);

4310 4311 4312
  // Clobber clobbered registers when running with the debug-code flag
  // turned on to provoke errors.
  if (emit_debug_code()) {
4313 4314
    Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
    Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4315 4316 4317 4318 4319 4320 4321 4322
  }
}


void MacroAssembler::AssertHasValidColor(const Register& reg) {
  if (emit_debug_code()) {
    // The bit sequence is backward. The first character in the string
    // represents the least significant bit.
4323
    DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336

    Label color_is_valid;
    Tbnz(reg, 0, &color_is_valid);
    Tbz(reg, 1, &color_is_valid);
    Abort(kUnexpectedColorFound);
    Bind(&color_is_valid);
  }
}


void MacroAssembler::GetMarkBits(Register addr_reg,
                                 Register bitmap_reg,
                                 Register shift_reg) {
4337 4338
  DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
  DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4339 4340 4341 4342 4343
  // addr_reg is divided into fields:
  // |63        page base        20|19    high      8|7   shift   3|2  0|
  // 'high' gives the index of the cell holding color bits for the object.
  // 'shift' gives the offset in the cell for this object's color.
  const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4344 4345 4346
  UseScratchRegisterScope temps(this);
  Register temp = temps.AcquireX();
  Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4347
  Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4348
  Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361
  // bitmap_reg:
  // |63        page base        20|19 zeros 15|14      high      3|2  0|
  Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
}


void MacroAssembler::HasColor(Register object,
                              Register bitmap_scratch,
                              Register shift_scratch,
                              Label* has_color,
                              int first_bit,
                              int second_bit) {
  // See mark-compact.h for color definitions.
4362
  DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4363 4364 4365 4366 4367 4368 4369 4370 4371 4372

  GetMarkBits(object, bitmap_scratch, shift_scratch);
  Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
  // Shift the bitmap down to get the color of the object in bits [1:0].
  Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);

  AssertHasValidColor(bitmap_scratch);

  // These bit sequences are backwards. The first character in the string
  // represents the least significant bit.
4373
  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4374 4375
  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
4376 4377 4378 4379

  // Check for the color.
  if (first_bit == 0) {
    // Checking for white.
4380
    DCHECK(second_bit == 0);
4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402
    // We only need to test the first bit.
    Tbz(bitmap_scratch, 0, has_color);
  } else {
    Label other_color;
    // Checking for grey or black.
    Tbz(bitmap_scratch, 0, &other_color);
    if (second_bit == 0) {
      Tbz(bitmap_scratch, 1, has_color);
    } else {
      Tbnz(bitmap_scratch, 1, has_color);
    }
    Bind(&other_color);
  }

  // Fall through if it does not have the right color.
}


void MacroAssembler::JumpIfBlack(Register object,
                                 Register scratch0,
                                 Register scratch1,
                                 Label* on_black) {
4403 4404
  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
  HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
4405 4406 4407 4408 4409 4410 4411 4412
}


void MacroAssembler::JumpIfDictionaryInPrototypeChain(
    Register object,
    Register scratch0,
    Register scratch1,
    Label* found) {
4413
  DCHECK(!AreAliased(object, scratch0, scratch1));
4414
  Register current = scratch0;
4415
  Label loop_again, end;
4416 4417 4418

  // Scratch contains elements pointer.
  Mov(current, object);
4419 4420 4421
  Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
  Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
  CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
4422 4423 4424 4425

  // Loop based on the map going up the prototype chain.
  Bind(&loop_again);
  Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4426 4427 4428 4429
  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
  CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
  B(lo, found);
4430
  Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4431
  DecodeField<Map::ElementsKindBits>(scratch1);
4432 4433
  CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
  Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4434 4435 4436
  CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);

  Bind(&end);
4437 4438 4439
}


hpayer's avatar
hpayer committed
4440 4441 4442 4443
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
                                 Register shift_scratch, Register load_scratch,
                                 Register length_scratch,
                                 Label* value_is_white) {
4444
  DCHECK(!AreAliased(
4445 4446 4447 4448
      value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));

  // These bit sequences are backwards. The first character in the string
  // represents the least significant bit.
4449
  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4450 4451
  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
4452 4453 4454 4455 4456 4457 4458 4459 4460 4461

  GetMarkBits(value, bitmap_scratch, shift_scratch);
  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
  Lsr(load_scratch, load_scratch, shift_scratch);

  AssertHasValidColor(load_scratch);

  // If the value is black or grey we don't need to do anything.
  // Since both black and grey have a 1 in the first position and white does
  // not have a 1 there we only need to check one bit.
hpayer's avatar
hpayer committed
4462
  Tbz(load_scratch, 0, value_is_white);
4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492
}


void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
  if (emit_debug_code()) {
    Check(cond, reason);
  }
}



void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
  if (emit_debug_code()) {
    CheckRegisterIsClear(reg, reason);
  }
}


void MacroAssembler::AssertRegisterIsRoot(Register reg,
                                          Heap::RootListIndex index,
                                          BailoutReason reason) {
  if (emit_debug_code()) {
    CompareRoot(reg, index);
    Check(eq, reason);
  }
}


void MacroAssembler::AssertFastElements(Register elements) {
  if (emit_debug_code()) {
4493 4494
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();
4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507
    Label ok;
    Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
    JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
    JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
    JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
    Abort(kJSObjectWithFastElementsMapHasSlowElements);
    Bind(&ok);
  }
}


void MacroAssembler::AssertIsString(const Register& object) {
  if (emit_debug_code()) {
4508 4509
    UseScratchRegisterScope temps(this);
    Register temp = temps.AcquireX();
4510
    STATIC_ASSERT(kSmiTag == 0);
4511
    Tst(object, kSmiTagMask);
4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548
    Check(ne, kOperandIsNotAString);
    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
    CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
    Check(lo, kOperandIsNotAString);
  }
}


void MacroAssembler::Check(Condition cond, BailoutReason reason) {
  Label ok;
  B(cond, &ok);
  Abort(reason);
  // Will not return here.
  Bind(&ok);
}


void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
  Label ok;
  Cbz(reg, &ok);
  Abort(reason);
  // Will not return here.
  Bind(&ok);
}


void MacroAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
  RecordComment("Abort message: ");
  RecordComment(GetBailoutReason(reason));

  if (FLAG_trap_on_abort) {
    Brk(0);
    return;
  }
#endif

4549 4550 4551 4552 4553 4554
  // Abort is used in some contexts where csp is the stack pointer. In order to
  // simplify the CallRuntime code, make sure that jssp is the stack pointer.
  // There is no risk of register corruption here because Abort doesn't return.
  Register old_stack_pointer = StackPointer();
  SetStackPointer(jssp);
  Mov(jssp, old_stack_pointer);
4555

4556 4557 4558
  // We need some scratch registers for the MacroAssembler, so make sure we have
  // some. This is safe here because Abort never returns.
  RegList old_tmp_list = TmpList()->list();
4559
  TmpList()->Combine(MacroAssembler::DefaultTmpList());
4560

4561
  if (use_real_aborts()) {
4562 4563 4564
    // Avoid infinite recursion; Push contains some assertions that use Abort.
    NoUseRealAbortsScope no_real_aborts(this);

4565 4566 4567 4568
    // Check if Abort() has already been initialized.
    DCHECK(isolate()->builtins()->Abort()->IsHeapObject());

    Move(x1, Smi::FromInt(static_cast<int>(reason)));
4569 4570 4571 4572 4573

    if (!has_frame_) {
      // We don't actually want to generate a pile of code for this, so just
      // claim there is a stack frame, without generating one.
      FrameScope scope(this, StackFrame::NONE);
4574
      Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
4575
    } else {
4576
      Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
4577 4578
    }
  } else {
4579 4580 4581 4582 4583
    // Load the string to pass to Printf.
    Label msg_address;
    Adr(x0, &msg_address);

    // Call Printf directly to report the error.
4584 4585
    CallPrintf();

4586 4587
    // We need a way to stop execution on both the simulator and real hardware,
    // and Unreachable() is the best option.
4588 4589
    Unreachable();

4590 4591
    // Emit the message string directly in the instruction stream.
    {
4592
      BlockPoolsScope scope(this);
4593 4594 4595
      Bind(&msg_address);
      EmitStringData(GetBailoutReason(reason));
    }
4596
  }
4597 4598

  SetStackPointer(old_stack_pointer);
4599
  TmpList()->set_list(old_tmp_list);
4600 4601 4602 4603 4604 4605 4606
}


void MacroAssembler::LoadTransitionedArrayMapConditional(
    ElementsKind expected_kind,
    ElementsKind transitioned_kind,
    Register map_in_out,
4607 4608
    Register scratch1,
    Register scratch2,
4609
    Label* no_map_match) {
4610 4611 4612
  DCHECK(IsFastElementsKind(expected_kind));
  DCHECK(IsFastElementsKind(transitioned_kind));

4613
  // Check that the function's map is the same as the expected cached map.
4614 4615 4616
  Ldr(scratch1, NativeContextMemOperand());
  Ldr(scratch2,
      ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
4617
  Cmp(map_in_out, scratch2);
4618 4619 4620
  B(ne, no_map_match);

  // Use the transitioned cached map.
4621 4622
  Ldr(map_in_out,
      ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
4623 4624 4625
}


4626 4627 4628
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
  Ldr(dst, NativeContextMemOperand());
  Ldr(dst, ContextMemOperand(dst, index));
4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656
}


void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
                                                  Register map,
                                                  Register scratch) {
  // Load the initial map. The global functions all have initial maps.
  Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
  if (emit_debug_code()) {
    Label ok, fail;
    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
    B(&ok);
    Bind(&fail);
    Abort(kGlobalFunctionsMustHaveInitialMap);
    Bind(&ok);
  }
}


// This is the main Printf implementation. All other Printf variants call
// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
void MacroAssembler::PrintfNoPreserve(const char * format,
                                      const CPURegister& arg0,
                                      const CPURegister& arg1,
                                      const CPURegister& arg2,
                                      const CPURegister& arg3) {
  // We cannot handle a caller-saved stack pointer. It doesn't make much sense
  // in most cases anyway, so this restriction shouldn't be too serious.
4657
  DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4658

4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698
  // The provided arguments, and their proper procedure-call standard registers.
  CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
  CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};

  int arg_count = kPrintfMaxArgCount;

  // The PCS varargs registers for printf. Note that x0 is used for the printf
  // format string.
  static const CPURegList kPCSVarargs =
      CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
  static const CPURegList kPCSVarargsFP =
      CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);

  // We can use caller-saved registers as scratch values, except for the
  // arguments and the PCS registers where they might need to go.
  CPURegList tmp_list = kCallerSaved;
  tmp_list.Remove(x0);      // Used to pass the format string.
  tmp_list.Remove(kPCSVarargs);
  tmp_list.Remove(arg0, arg1, arg2, arg3);

  CPURegList fp_tmp_list = kCallerSavedFP;
  fp_tmp_list.Remove(kPCSVarargsFP);
  fp_tmp_list.Remove(arg0, arg1, arg2, arg3);

  // Override the MacroAssembler's scratch register list. The lists will be
  // reset automatically at the end of the UseScratchRegisterScope.
  UseScratchRegisterScope temps(this);
  TmpList()->set_list(tmp_list.list());
  FPTmpList()->set_list(fp_tmp_list.list());

  // Copies of the printf vararg registers that we can pop from.
  CPURegList pcs_varargs = kPCSVarargs;
  CPURegList pcs_varargs_fp = kPCSVarargsFP;

  // Place the arguments. There are lots of clever tricks and optimizations we
  // could use here, but Printf is a debug tool so instead we just try to keep
  // it simple: Move each input that isn't already in the right place to a
  // scratch register, then move everything back.
  for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
    // Work out the proper PCS register for this argument.
4699
    if (args[i].IsRegister()) {
4700 4701 4702 4703
      pcs[i] = pcs_varargs.PopLowestIndex().X();
      // We might only need a W register here. We need to know the size of the
      // argument so we can properly encode it for the simulator call.
      if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4704
    } else if (args[i].IsFPRegister()) {
4705 4706
      // In C, floats are always cast to doubles for varargs calls.
      pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4707
    } else {
4708
      DCHECK(args[i].IsNone());
4709 4710 4711 4712
      arg_count = i;
      break;
    }

4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730
    // If the argument is already in the right place, leave it where it is.
    if (args[i].Aliases(pcs[i])) continue;

    // Otherwise, if the argument is in a PCS argument register, allocate an
    // appropriate scratch register and then move it out of the way.
    if (kPCSVarargs.IncludesAliasOf(args[i]) ||
        kPCSVarargsFP.IncludesAliasOf(args[i])) {
      if (args[i].IsRegister()) {
        Register old_arg = Register(args[i]);
        Register new_arg = temps.AcquireSameSizeAs(old_arg);
        Mov(new_arg, old_arg);
        args[i] = new_arg;
      } else {
        FPRegister old_arg = FPRegister(args[i]);
        FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
        Fmov(new_arg, old_arg);
        args[i] = new_arg;
      }
4731 4732 4733
    }
  }

4734 4735 4736
  // Do a second pass to move values into their final positions and perform any
  // conversions that may be required.
  for (int i = 0; i < arg_count; i++) {
4737
    DCHECK(pcs[i].type() == args[i].type());
4738 4739 4740
    if (pcs[i].IsRegister()) {
      Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
    } else {
4741
      DCHECK(pcs[i].IsFPRegister());
4742 4743 4744 4745 4746 4747
      if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
        Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
      } else {
        Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
      }
    }
4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759
  }

  // Load the format string into x0, as per the procedure-call standard.
  //
  // To make the code as portable as possible, the format string is encoded
  // directly in the instruction stream. It might be cleaner to encode it in a
  // literal pool, but since Printf is usually used for debugging, it is
  // beneficial for it to be minimally dependent on other features.
  Label format_address;
  Adr(x0, &format_address);

  // Emit the format string directly in the instruction stream.
4760
  { BlockPoolsScope scope(this);
4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774
    Label after_data;
    B(&after_data);
    Bind(&format_address);
    EmitStringData(format);
    Unreachable();
    Bind(&after_data);
  }

  // We don't pass any arguments on the stack, but we still need to align the C
  // stack pointer to a 16-byte boundary for PCS compliance.
  if (!csp.Is(StackPointer())) {
    Bic(csp, StackPointer(), 0xf);
  }

4775
  CallPrintf(arg_count, pcs);
4776 4777 4778
}


4779
void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4780 4781 4782 4783 4784 4785
  // A call to printf needs special handling for the simulator, since the system
  // printf function will use a different instruction set and the procedure-call
  // standard will not be compatible.
#ifdef USE_SIMULATOR
  { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
    hlt(kImmExceptionIsPrintf);
4786 4787 4788 4789 4790 4791 4792 4793 4794
    dc32(arg_count);          // kPrintfArgCountOffset

    // Determine the argument pattern.
    uint32_t arg_pattern_list = 0;
    for (int i = 0; i < arg_count; i++) {
      uint32_t arg_pattern;
      if (args[i].IsRegister()) {
        arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
      } else {
4795
        DCHECK(args[i].Is64Bits());
4796 4797
        arg_pattern = kPrintfArgD;
      }
4798
      DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4799 4800 4801
      arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
    }
    dc32(arg_pattern_list);   // kPrintfArgPatternListOffset
4802 4803 4804 4805 4806 4807 4808 4809
  }
#else
  Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
#endif
}


void MacroAssembler::Printf(const char * format,
4810 4811 4812 4813 4814 4815
                            CPURegister arg0,
                            CPURegister arg1,
                            CPURegister arg2,
                            CPURegister arg3) {
  // We can only print sp if it is the current stack pointer.
  if (!csp.Is(StackPointer())) {
4816 4817 4818 4819
    DCHECK(!csp.Aliases(arg0));
    DCHECK(!csp.Aliases(arg1));
    DCHECK(!csp.Aliases(arg2));
    DCHECK(!csp.Aliases(arg3));
4820 4821
  }

4822 4823 4824 4825 4826 4827 4828
  // Printf is expected to preserve all registers, so make sure that none are
  // available as scratch registers until we've preserved them.
  RegList old_tmp_list = TmpList()->list();
  RegList old_fp_tmp_list = FPTmpList()->list();
  TmpList()->set_list(0);
  FPTmpList()->set_list(0);

4829 4830 4831 4832 4833
  // Preserve all caller-saved registers as well as NZCV.
  // If csp is the stack pointer, PushCPURegList asserts that the size of each
  // list is a multiple of 16 bytes.
  PushCPURegList(kCallerSaved);
  PushCPURegList(kCallerSavedFP);
4834 4835 4836 4837 4838 4839 4840 4841 4842 4843

  // We can use caller-saved registers as scratch values (except for argN).
  CPURegList tmp_list = kCallerSaved;
  CPURegList fp_tmp_list = kCallerSavedFP;
  tmp_list.Remove(arg0, arg1, arg2, arg3);
  fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
  TmpList()->set_list(tmp_list.list());
  FPTmpList()->set_list(fp_tmp_list.list());

  { UseScratchRegisterScope temps(this);
4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861
    // If any of the arguments are the current stack pointer, allocate a new
    // register for them, and adjust the value to compensate for pushing the
    // caller-saved registers.
    bool arg0_sp = StackPointer().Aliases(arg0);
    bool arg1_sp = StackPointer().Aliases(arg1);
    bool arg2_sp = StackPointer().Aliases(arg2);
    bool arg3_sp = StackPointer().Aliases(arg3);
    if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
      // Allocate a register to hold the original stack pointer value, to pass
      // to PrintfNoPreserve as an argument.
      Register arg_sp = temps.AcquireX();
      Add(arg_sp, StackPointer(),
          kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
      if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
      if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
      if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
      if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
    }
4862

4863 4864 4865 4866 4867 4868
    // Preserve NZCV.
    { UseScratchRegisterScope temps(this);
      Register tmp = temps.AcquireX();
      Mrs(tmp, NZCV);
      Push(tmp, xzr);
    }
4869

4870 4871 4872 4873 4874 4875 4876 4877
    PrintfNoPreserve(format, arg0, arg1, arg2, arg3);

    // Restore NZCV.
    { UseScratchRegisterScope temps(this);
      Register tmp = temps.AcquireX();
      Pop(xzr, tmp);
      Msr(NZCV, tmp);
    }
4878 4879
  }

4880 4881
  PopCPURegList(kCallerSavedFP);
  PopCPURegList(kCallerSaved);
4882 4883 4884

  TmpList()->set_list(old_tmp_list);
  FPTmpList()->set_list(old_fp_tmp_list);
4885 4886 4887 4888 4889 4890 4891
}


void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
  // TODO(jbramley): Other architectures use the internal memcpy to copy the
  // sequence. If this is a performance bottleneck, we should consider caching
  // the sequence and copying it in the same way.
4892 4893
  InstructionAccurateScope scope(this,
                                 kNoCodeAgeSequenceLength / kInstructionSize);
4894
  DCHECK(jssp.Is(StackPointer()));
4895 4896 4897 4898 4899 4900
  EmitFrameSetupForCodeAgePatching(this);
}



void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4901 4902
  InstructionAccurateScope scope(this,
                                 kNoCodeAgeSequenceLength / kInstructionSize);
4903
  DCHECK(jssp.Is(StackPointer()));
4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919
  EmitCodeAgeSequence(this, stub);
}


#undef __
#define __ assm->


void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
  Label start;
  __ bind(&start);

  // We can do this sequence using four instructions, but the code ageing
  // sequence that patches it needs five, so we use the extra space to try to
  // simplify some addressing modes and remove some dependencies (compared to
  // using two stp instructions with write-back).
4920 4921 4922 4923
  __ sub(jssp, jssp, 4 * kXRegSize);
  __ sub(csp, csp, 4 * kXRegSize);
  __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
  __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
4924 4925
  __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);

4926
  __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940
}


void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
                                         Code * stub) {
  Label start;
  __ bind(&start);
  // When the stub is called, the sequence is replaced with the young sequence
  // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
  // stub jumps to &start, stored in x0. The young sequence does not call the
  // stub so there is no infinite loop here.
  //
  // A branch (br) is used rather than a call (blr) because this code replaces
  // the frame setup code that would normally preserve lr.
4941
  __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
4942 4943
  __ adr(x0, &start);
  __ br(ip0);
4944
  // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
4945 4946 4947 4948
  // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
  __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
  if (stub) {
    __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
4949
    __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4950 4951 4952 4953
  }
}


4954 4955
bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
  bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
4956
  DCHECK(is_young ||
4957
         isolate->code_aging_helper()->IsOld(sequence));
4958 4959 4960 4961
  return is_young;
}


4962 4963 4964
void MacroAssembler::TruncatingDiv(Register result,
                                   Register dividend,
                                   int32_t divisor) {
4965 4966
  DCHECK(!AreAliased(result, dividend));
  DCHECK(result.Is32Bits() && dividend.Is32Bits());
4967 4968 4969
  base::MagicNumbersForDivision<uint32_t> mag =
      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
  Mov(result, mag.multiplier);
4970
  Smull(result.X(), dividend, result);
4971
  Asr(result.X(), result.X(), 32);
4972 4973 4974 4975
  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
  if (divisor > 0 && neg) Add(result, result, dividend);
  if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
  if (mag.shift > 0) Asr(result, result, mag.shift);
4976
  Add(result, result, Operand(dividend, LSR, 31));
4977 4978 4979
}


4980
#undef __
4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004


UseScratchRegisterScope::~UseScratchRegisterScope() {
  available_->set_list(old_available_);
  availablefp_->set_list(old_availablefp_);
}


Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
  int code = AcquireNextAvailable(available_).code();
  return Register::Create(code, reg.SizeInBits());
}


FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
  int code = AcquireNextAvailable(availablefp_).code();
  return FPRegister::Create(code, reg.SizeInBits());
}


CPURegister UseScratchRegisterScope::AcquireNextAvailable(
    CPURegList* available) {
  CHECK(!available->IsEmpty());
  CPURegister result = available->PopLowestIndex();
5005
  DCHECK(!AreAliased(result, xzr, csp));
5006 5007 5008 5009
  return result;
}


5010 5011
CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
                                                   const CPURegister& reg) {
5012
  DCHECK(available->IncludesAliasOf(reg));
5013 5014 5015 5016 5017
  available->Remove(reg);
  return reg;
}


5018 5019 5020 5021 5022
#define __ masm->


void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
                              const Label* smi_check) {
5023
  Assembler::BlockPoolsScope scope(masm);
5024
  if (reg.IsValid()) {
5025 5026
    DCHECK(smi_check->is_bound());
    DCHECK(reg.Is64Bits());
5027 5028 5029 5030 5031

    // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
    // 'check' in the other bits. The possible offset is limited in that we
    // use BitField to pack the data, and the underlying data type is a
    // uint32_t.
5032 5033
    uint32_t delta =
        static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
5034 5035
    __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
  } else {
5036
    DCHECK(!smi_check->is_bound());
5037 5038 5039 5040 5041 5042 5043 5044 5045 5046

    // An offset of 0 indicates that there is no patch site.
    __ InlineData(0);
  }
}


InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
    : reg_(NoReg), smi_check_(NULL) {
  InstructionSequence* inline_data = InstructionSequence::At(info);
5047
  DCHECK(inline_data->IsInlineData());
5048 5049 5050 5051
  if (inline_data->IsInlineData()) {
    uint64_t payload = inline_data->InlineData();
    // We use BitField to decode the payload, and BitField can only handle
    // 32-bit values.
5052
    DCHECK(is_uint32(payload));
5053
    if (payload != 0) {
5054 5055
      uint32_t payload32 = static_cast<uint32_t>(payload);
      int reg_code = RegisterBits::decode(payload32);
5056
      reg_ = Register::XRegFromCode(reg_code);
5057
      int smi_check_delta = DeltaBits::decode(payload32);
5058
      DCHECK(smi_check_delta != 0);
5059
      smi_check_ = inline_data->preceding(smi_check_delta);
5060 5061 5062 5063 5064 5065 5066 5067
    }
  }
}


#undef __


5068 5069
}  // namespace internal
}  // namespace v8
5070

5071
#endif  // V8_TARGET_ARCH_ARM64