assembler-arm.h 66.7 KB
Newer Older
1 2 3
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
4
// Redistribution and use in source and binary forms, with or without
5 6 7 8 9
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
10
//
11 12 13 14 15 16 17 18
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
19 20 21
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 23 24 25 26 27 28 29 30 31 32
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.

33 34
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
35
// Copyright 2012 the V8 project authors. All rights reserved.
36 37 38 39

// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5

40 41
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
42

lrn@chromium.org's avatar
lrn@chromium.org committed
43
#include <stdio.h>
44 45
#include <vector>

46
#include "src/arm/constants-arm.h"
47
#include "src/assembler.h"
48
#include "src/boxed-float.h"
49
#include "src/double.h"
50

51 52
namespace v8 {
namespace internal {
53

54 55 56 57 58
// clang-format off
#define GENERAL_REGISTERS(V)                              \
  V(r0)  V(r1)  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)  \
  V(r8)  V(r9)  V(r10) V(fp)  V(ip)  V(sp)  V(lr)  V(pc)

59 60 61
#define ALLOCATABLE_GENERAL_REGISTERS(V)                  \
  V(r0)  V(r1)  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)  \
  V(r8)  V(r9)
62

63 64 65 66 67 68
#define FLOAT_REGISTERS(V)                                \
  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  \
  V(s8)  V(s9)  V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
  V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
  V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)

69
#define LOW_DOUBLE_REGISTERS(V)                           \
70
  V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
71 72 73
  V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)

#define NON_LOW_DOUBLE_REGISTERS(V)                       \
74 75 76
  V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
  V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)

77 78 79
#define DOUBLE_REGISTERS(V) \
  LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)

80 81 82 83
#define SIMD128_REGISTERS(V)                              \
  V(q0)  V(q1)  V(q2)  V(q3)  V(q4)  V(q5)  V(q6)  V(q7)  \
  V(q8)  V(q9)  V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)

84 85
#define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
  V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
86
  V(d8)  V(d9)  V(d10) V(d11) V(d12)                      \
87 88 89 90 91
  V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
  V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)

#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V)          \
  V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
92 93 94 95 96
  V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d15)

#define C_REGISTERS(V)                                            \
  V(cr0)  V(cr1)  V(cr2)  V(cr3)  V(cr4)  V(cr5)  V(cr6)  V(cr7)  \
  V(cr8)  V(cr9)  V(cr10) V(cr11) V(cr12) V(cr15)
97 98
// clang-format on

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
// The ARM ABI does not specify the usage of register r9, which may be reserved
// as the static base or thread register on some platforms, in which case we
// leave it alone. Adjust the value of kR9Available accordingly:
const int kR9Available = 1;  // 1 if available to us, 0 if reserved

// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
const int kNumRegs = 16;

// Caller-saved/arguments registers
const RegList kJSCallerSaved =
  1 << 0 |  // r0 a1
  1 << 1 |  // r1 a2
  1 << 2 |  // r2 a3
  1 << 3;   // r3 a4

const int kNumJSCallerSaved = 4;

// Callee-saved registers preserved when switching from C to JavaScript
const RegList kCalleeSaved =
  1 <<  4 |  //  r4 v1
  1 <<  5 |  //  r5 v2
  1 <<  6 |  //  r6 v3
  1 <<  7 |  //  r7 v4 (cp in JavaScript code)
  1 <<  8 |  //  r8 v5 (pp in JavaScript code)
  kR9Available <<  9 |  //  r9 v6
  1 << 10 |  // r10 v7
  1 << 11;   // r11 v8 (fp in JavaScript code)

// When calling into C++ (only for C++ calls that can't cause a GC).
// The call code will take care of lr, fp, etc.
const RegList kCallerSaved =
  1 <<  0 |  // r0
  1 <<  1 |  // r1
  1 <<  2 |  // r2
  1 <<  3 |  // r3
  1 <<  9;   // r9

const int kNumCalleeSaved = 7 + kR9Available;

// Double registers d8 to d15 are callee-saved.
const int kNumDoubleCalleeSaved = 8;

// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;

// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;

153 154 155
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
  GENERAL_REGISTERS(REGISTER_CODE)
156
#undef REGISTER_CODE
157 158
      kRegAfterLast
};
159

160 161 162
class Register : public RegisterBase<Register, kRegAfterLast> {
  friend class RegisterBase;
  explicit constexpr Register(int code) : RegisterBase(code) {}
163 164
};

165 166
ASSERT_TRIVIALLY_COPYABLE(Register);
static_assert(sizeof(Register) == sizeof(int),
167 168
              "Register can efficiently be passed by value");

169 170
// r7: context register
// r9: lithium scratch
171 172
#define DECLARE_REGISTER(R) \
  constexpr Register R = Register::from_code<kRegCode_##R>();
173 174
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
175
constexpr Register no_reg = Register::no_reg();
176

177
constexpr bool kPadArguments = false;
178 179
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
180

181 182 183
enum SwVfpRegisterCode {
#define REGISTER_CODE(R) kSwVfpCode_##R,
  FLOAT_REGISTERS(REGISTER_CODE)
184
#undef REGISTER_CODE
185 186
      kSwVfpAfterLast
};
187

188 189 190 191 192 193 194 195 196 197 198
// Representation of a list of non-overlapping VFP registers. This list
// represents the data layout of VFP registers as a bitfield:
//   S registers cover 1 bit
//   D registers cover 2 bits
//   Q registers cover 4 bits
//
// This way, we make sure no registers in the list ever overlap. However, a list
// may represent multiple different sets of registers,
// e.g. [d0 s2 s3] <=> [s0 s1 d1].
typedef uint64_t VfpRegList;

199 200 201
// Single word VFP register.
class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
 public:
202
  static constexpr int kSizeInBytes = 4;
203

204 205
  static void split_code(int reg_code, int* vm, int* m) {
    DCHECK(from_code(reg_code).is_valid());
206 207
    *m = reg_code & 0x1;
    *vm = reg_code >> 1;
208
  }
209
  void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
210 211 212 213 214
  VfpRegList ToVfpRegList() const {
    DCHECK(is_valid());
    // Each bit in the list corresponds to a S register.
    return uint64_t{0x1} << code();
  }
215

216 217 218
 private:
  friend class RegisterBase;
  explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
219 220
};

221 222
ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
static_assert(sizeof(SwVfpRegister) == sizeof(int),
223 224
              "SwVfpRegister can efficiently be passed by value");

225
typedef SwVfpRegister FloatRegister;
226

227 228 229
enum DoubleRegisterCode {
#define REGISTER_CODE(R) kDoubleCode_##R,
  DOUBLE_REGISTERS(REGISTER_CODE)
230
#undef REGISTER_CODE
231 232
      kDoubleAfterLast
};
233

234 235 236
// Double word VFP register.
class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
 public:
237
  static constexpr int kSizeInBytes = 8;
238

239
  inline static int NumRegisters();
240

241 242
  static void split_code(int reg_code, int* vm, int* m) {
    DCHECK(from_code(reg_code).is_valid());
243 244
    *m = (reg_code & 0x10) >> 4;
    *vm = reg_code & 0x0F;
245
  }
246
  void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
247 248 249 250 251
  VfpRegList ToVfpRegList() const {
    DCHECK(is_valid());
    // A D register overlaps two S registers.
    return uint64_t{0x3} << (code() * 2);
  }
252

253 254 255 256
 private:
  friend class RegisterBase;
  friend class LowDwVfpRegister;
  explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
257 258
};

259 260
ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
static_assert(sizeof(DwVfpRegister) == sizeof(int),
261
              "DwVfpRegister can efficiently be passed by value");
262

263
typedef DwVfpRegister DoubleRegister;
264 265


266
// Double word VFP register d0-15.
267 268
class LowDwVfpRegister
    : public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
269
 public:
270
  constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
271

272
  SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
273
  SwVfpRegister high() const {
274
    return SwVfpRegister::from_code(code() * 2 + 1);
275
  }
276 277 278 279 280
  VfpRegList ToVfpRegList() const {
    DCHECK(is_valid());
    // A D register overlaps two S registers.
    return uint64_t{0x3} << (code() * 2);
  }
281

282 283 284
 private:
  friend class RegisterBase;
  explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
285 286
};

287 288 289 290 291 292
enum Simd128RegisterCode {
#define REGISTER_CODE(R) kSimd128Code_##R,
  SIMD128_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
      kSimd128AfterLast
};
293

294
// Quad word NEON register.
295 296
class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
 public:
297 298
  static void split_code(int reg_code, int* vm, int* m) {
    DCHECK(from_code(reg_code).is_valid());
299
    int encoded_code = reg_code << 1;
300 301
    *m = (encoded_code & 0x10) >> 4;
    *vm = encoded_code & 0x0F;
302
  }
303 304
  void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
  DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
305
  DwVfpRegister high() const {
306
    return DwVfpRegister::from_code(code() * 2 + 1);
307
  }
308 309 310 311 312
  VfpRegList ToVfpRegList() const {
    DCHECK(is_valid());
    // A Q register overlaps four S registers.
    return uint64_t{0xf} << (code() * 4);
  }
313

314 315 316
 private:
  friend class RegisterBase;
  explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
317 318 319 320 321
};


typedef QwNeonRegister QuadRegister;

322
typedef QwNeonRegister Simd128Register;
323

324 325 326 327 328 329 330 331 332 333 334 335 336
enum CRegisterCode {
#define REGISTER_CODE(R) kCCode_##R,
  C_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
      kCAfterLast
};

// Coprocessor register
class CRegister : public RegisterBase<CRegister, kCAfterLast> {
  friend class RegisterBase;
  explicit constexpr CRegister(int code) : RegisterBase(code) {}
};

337
// Support for the VFP registers s0 to s31 (d0 to d15).
338
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
339 340 341 342 343 344 345 346 347
#define DECLARE_FLOAT_REGISTER(R) \
  constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
#undef DECLARE_FLOAT_REGISTER

#define DECLARE_LOW_DOUBLE_REGISTER(R) \
  constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
#undef DECLARE_LOW_DOUBLE_REGISTER
348

349 350 351 352 353 354 355 356 357 358 359
#define DECLARE_DOUBLE_REGISTER(R) \
  constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER

constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();

#define DECLARE_SIMD128_REGISTER(R) \
  constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
#undef DECLARE_SIMD128_REGISTER
360 361 362 363

// Aliases for double registers.
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
364
constexpr LowDwVfpRegister kDoubleRegZero  = d13;
365

366
constexpr CRegister no_creg = CRegister::no_reg();
367

368 369 370 371
#define DECLARE_C_REGISTER(R) \
  constexpr CRegister R = CRegister::from_code<kCCode_##R>();
C_REGISTERS(DECLARE_C_REGISTER)
#undef DECLARE_C_REGISTER
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399

// Coprocessor number
enum Coprocessor {
  p0  = 0,
  p1  = 1,
  p2  = 2,
  p3  = 3,
  p4  = 4,
  p5  = 5,
  p6  = 6,
  p7  = 7,
  p8  = 8,
  p9  = 9,
  p10 = 10,
  p11 = 11,
  p12 = 12,
  p13 = 13,
  p14 = 14,
  p15 = 15
};

// -----------------------------------------------------------------------------
// Machine instruction Operands

// Class Operand represents a shifter operand in data processing instructions
class Operand BASE_EMBEDDED {
 public:
  // immediate
400 401 402 403
  V8_INLINE explicit Operand(int32_t immediate,
                             RelocInfo::Mode rmode = RelocInfo::NONE);
  V8_INLINE static Operand Zero();
  V8_INLINE explicit Operand(const ExternalReference& f);
404
  explicit Operand(Handle<HeapObject> handle);
405
  V8_INLINE explicit Operand(Smi* value);
406 407

  // rm
408
  V8_INLINE explicit Operand(Register rm);
409 410 411

  // rm <shift_op> shift_imm
  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
412
  V8_INLINE static Operand SmiUntag(Register rm) {
413 414
    return Operand(rm, ASR, kSmiTagSize);
  }
415
  V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) {
416 417 418
    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
    return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
  }
419
  V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) {
420 421 422
    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
    return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
  }
423 424 425 426

  // rm <shift_op> rs
  explicit Operand(Register rm, ShiftOp shift_op, Register rs);

427 428
  static Operand EmbeddedNumber(double number);  // Smi or HeapNumber.
  static Operand EmbeddedCode(CodeStub* stub);
429

430
  // Return true if this is a register operand.
431
  bool IsRegister() const {
432 433
    return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
           shift_imm_ == 0;
434
  }
435 436 437 438 439 440 441 442
  // Return true if this is a register operand shifted with an immediate.
  bool IsImmediateShiftedRegister() const {
    return rm_.is_valid() && !rs_.is_valid();
  }
  // Return true if this is a register operand shifted with a register.
  bool IsRegisterShiftedRegister() const {
    return rm_.is_valid() && rs_.is_valid();
  }
443

444 445
  // Return the number of actual instructions required to implement the given
  // instruction for this particular operand. This can be a single instruction,
446 447
  // if no load into a scratch register is necessary, or anything between 2 and
  // 4 instructions when we need to load from the constant pool (depending upon
448
  // whether the constant pool entry is in the small or extended section). If
449 450 451
  // the instruction this operand is used for is a MOV or MVN instruction the
  // actual instruction to use is required for this calculation. For other
  // instructions instr is ignored.
452 453 454
  //
  // The value returned is only valid as long as no entries are added to the
  // constant pool between this call and the actual instruction being emitted.
455 456
  int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
  bool MustOutputRelocInfo(const Assembler* assembler) const;
457 458

  inline int32_t immediate() const {
459
    DCHECK(IsImmediate());
460
    DCHECK(!IsHeapObjectRequest());
461 462
    return value_.immediate;
  }
463 464 465
  bool IsImmediate() const {
    return !rm_.is_valid();
  }
466

467 468 469
  HeapObjectRequest heap_object_request() const {
    DCHECK(IsHeapObjectRequest());
    return value_.heap_object_request;
470
  }
471 472 473 474 475 476
  bool IsHeapObjectRequest() const {
    DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
    DCHECK_IMPLIES(is_heap_object_request_,
        rmode_ == RelocInfo::EMBEDDED_OBJECT ||
        rmode_ == RelocInfo::CODE_TARGET);
    return is_heap_object_request_;
477
  }
478

479
  Register rm() const { return rm_; }
480 481
  Register rs() const { return rs_; }
  ShiftOp shift_op() const { return shift_op_; }
482

483

484
 private:
485 486
  Register rm_ = no_reg;
  Register rs_ = no_reg;
487
  ShiftOp shift_op_;
488
  int shift_imm_;                // valid if rm_ != no_reg && rs_ == no_reg
489 490 491 492 493 494
  union Value {
    Value() {}
    HeapObjectRequest heap_object_request;  // if is_heap_object_request_
    int32_t immediate;                      // otherwise
  } value_;                                 // valid if rm_ == no_reg
  bool is_heap_object_request_ = false;
495
  RelocInfo::Mode rmode_;
496 497 498 499 500 501 502 503 504 505 506

  friend class Assembler;
};


// Class MemOperand represents a memory operand in load and store instructions
class MemOperand BASE_EMBEDDED {
 public:
  // [rn +/- offset]      Offset/NegOffset
  // [rn +/- offset]!     PreIndex/NegPreIndex
  // [rn], +/- offset     PostIndex/NegPostIndex
507 508 509
  // offset is any signed 32-bit value; offset is first loaded to a scratch
  // register if it does not fit the addressing mode (12-bit unsigned and sign
  // bit)
510 511 512 513 514 515 516 517 518 519 520 521
  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);

  // [rn +/- rm]          Offset/NegOffset
  // [rn +/- rm]!         PreIndex/NegPreIndex
  // [rn], +/- rm         PostIndex/NegPostIndex
  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);

  // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
  // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
  // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
  explicit MemOperand(Register rn, Register rm,
                      ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
522 523 524
  V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
                                                       Register key,
                                                       AddrMode am = Offset) {
525 526 527
    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
    return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
  }
528

529
  void set_offset(int32_t offset) {
530 531
    DCHECK(rm_ == no_reg);
    offset_ = offset;
532 533
  }

534
  uint32_t offset() const {
535 536
    DCHECK(rm_ == no_reg);
    return offset_;
537 538
  }

539 540
  Register rn() const { return rn_; }
  Register rm() const { return rm_; }
541
  AddrMode am() const { return am_; }
542

543 544 545 546
  bool OffsetIsUint12Encodable() const {
    return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
  }

547 548 549 550 551 552 553 554 555 556 557
 private:
  Register rn_;  // base
  Register rm_;  // register offset
  int32_t offset_;  // valid if rm_ == no_reg
  ShiftOp shift_op_;
  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
  AddrMode am_;  // bits P, U, and W

  friend class Assembler;
};

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585

// Class NeonMemOperand represents a memory operand in load and
// store NEON instructions
class NeonMemOperand BASE_EMBEDDED {
 public:
  // [rn {:align}]       Offset
  // [rn {:align}]!      PostIndex
  explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);

  // [rn {:align}], rm   PostIndex
  explicit NeonMemOperand(Register rn, Register rm, int align = 0);

  Register rn() const { return rn_; }
  Register rm() const { return rm_; }
  int align() const { return align_; }

 private:
  void SetAlignment(int align);

  Register rn_;  // base
  Register rm_;  // register increment
  int align_;
};


// Class NeonListOperand represents a list of NEON registers
class NeonListOperand BASE_EMBEDDED {
 public:
586 587 588 589
  explicit NeonListOperand(DoubleRegister base, int register_count = 1)
    : base_(base), register_count_(register_count) {}
  explicit NeonListOperand(QwNeonRegister q_reg)
    : base_(q_reg.low()), register_count_(2) {}
590
  DoubleRegister base() const { return base_; }
591 592 593 594 595 596 597 598 599 600 601 602
  int register_count() { return register_count_; }
  int length() const { return register_count_ - 1; }
  NeonListType type() const {
    switch (register_count_) {
      default: UNREACHABLE();
      // Fall through.
      case 1: return nlt_1;
      case 2: return nlt_2;
      case 3: return nlt_3;
      case 4: return nlt_4;
    }
  }
603 604
 private:
  DoubleRegister base_;
605
  int register_count_;
606 607
};

608
class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
609 610 611 612 613 614
 public:
  // Create an assembler. Instructions and relocation information are emitted
  // into a buffer, with the instructions starting from the beginning and the
  // relocation information starting from the end of the buffer. See CodeDesc
  // for a detailed comment on the layout (globals.h).
  //
615 616 617 618
  // If the provided buffer is nullptr, the assembler allocates and grows its
  // own buffer, and buffer_size determines the initial buffer size. The buffer
  // is owned by the assembler and deallocated upon destruction of the
  // assembler.
619
  //
620 621 622 623
  // If the provided buffer is not nullptr, the assembler uses the provided
  // buffer for code generation and assumes its size to be buffer_size. If the
  // buffer is too small, a fatal error occurs. No deallocation of the buffer is
  // done upon destruction of the assembler.
624
  Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
625
  virtual ~Assembler();
626 627 628

  // GetCode emits any pending (non-emitted) code and fills the descriptor
  // desc. GetCode() is idempotent; it returns the same result if no other
629
  // Assembler functions are invoked in between GetCode() calls.
630
  void GetCode(Isolate* isolate, CodeDesc* desc);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650

  // Label operations & relative jumps (PPUM Appendix D)
  //
  // Takes a branch opcode (cc) and a label (L) and generates
  // either a backward branch or a forward branch and links it
  // to the label fixup chain. Usage:
  //
  // Label L;    // unbound label
  // j(cc, &L);  // forward branch to unbound label
  // bind(&L);   // bind label to the current pc
  // j(cc, &L);  // backward branch to bound label
  // bind(&L);   // illegal: a label may be bound only once
  //
  // Note: The same Label can be used for forward and backward branches
  // but it may be bound only once.

  void bind(Label* L);  // binds an unbound label L to the current code position

  // Returns the branch offset to the given label from the current code position
  // Links the label to the current position if it is still unbound
651
  // Manages the jump elimination optimization if the second parameter is true.
652
  int branch_offset(Label* L);
653

654 655
  // Returns true if the given pc address is the start of a constant pool load
  // instruction sequence.
656
  V8_INLINE static bool is_constant_pool_load(Address pc);
657

658
  // Return the address in the constant pool of the code target address used by
659
  // the branch/call instruction at pc, or the object in a mov.
660 661
  V8_INLINE static Address constant_pool_entry_address(Address pc,
                                                       Address constant_pool);
662

663
  // Read/Modify the code target address in the branch/call instruction at pc.
664
  // The isolate argument is unused (and may be nullptr) when skipping flushing.
665 666
  V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
  V8_INLINE static void set_target_address_at(
667
      Address pc, Address constant_pool, Address target,
668
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
669

670 671
  // Return the code target address at a call site from the return address
  // of that call in the instruction stream.
672
  V8_INLINE static Address target_address_from_return_address(Address pc);
673 674 675

  // Given the address of the beginning of a call, return the address
  // in the instruction stream that the call will return from.
676
  V8_INLINE static Address return_address_from_call_start(Address pc);
677

678 679
  // This sets the branch destination (which is in the constant pool on ARM).
  // This is for calls and branches within generated code.
680
  inline static void deserialization_set_special_target_at(
681
      Address constant_pool_entry, Code* code, Address target);
682

683 684 685
  // Get the size of the special target encoded at 'location'.
  inline static int deserialization_special_target_size(Address location);

686 687
  // This sets the internal reference at the pc.
  inline static void deserialization_set_target_internal_reference_at(
688
      Address pc, Address target,
689
      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
690

691 692 693
  // Here we are patching the address in the constant pool, not the actual call
  // instruction.  The address in the constant pool is the same size as a
  // pointer.
694
  static constexpr int kSpecialTargetSize = kPointerSize;
695

696
  // Size of an instruction.
697
  static constexpr int kInstrSize = sizeof(Instr);
698

699
  RegList* GetScratchRegisterList() { return &scratch_register_list_; }
700 701 702
  VfpRegList* GetScratchVfpRegisterList() {
    return &scratch_vfp_register_list_;
  }
703

704 705 706 707 708 709 710
  // ---------------------------------------------------------------------------
  // Code generation

  // Insert the smallest number of nop instructions
  // possible to align the pc offset to a multiple
  // of m. m must be a power of 2 (>= 4).
  void Align(int m);
711 712 713
  // Insert the smallest number of zero bytes possible to align the pc offset
  // to a mulitple of m. m must be a power of 2 (>= 2).
  void DataAlign(int m);
714 715
  // Aligns code to something that's optimal for a jump target for the platform.
  void CodeTargetAlign();
716 717

  // Branch instructions
718 719 720 721
  void b(int branch_offset, Condition cond = al,
         RelocInfo::Mode rmode = RelocInfo::NONE);
  void bl(int branch_offset, Condition cond = al,
          RelocInfo::Mode rmode = RelocInfo::NONE);
722 723 724 725 726
  void blx(int branch_offset);  // v5 and above
  void blx(Register target, Condition cond = al);  // v5 and above
  void bx(Register target, Condition cond = al);  // v5 and above, plus v4t

  // Convenience branch instructions using labels
727
  void b(Label* L, Condition cond = al);
728
  void b(Condition cond, Label* L) { b(L, cond); }
729 730 731
  void bl(Label* L, Condition cond = al);
  void bl(Condition cond, Label* L) { bl(L, cond); }
  void blx(Label* L);  // v5 and above
732 733

  // Data-processing instructions
734

735 736
  void and_(Register dst, Register src1, const Operand& src2,
            SBit s = LeaveCC, Condition cond = al);
737 738
  void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
            Condition cond = al);
739 740 741 742 743 744

  void eor(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);

  void sub(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);
745
  void sub(Register dst, Register src1, Register src2,
746
           SBit s = LeaveCC, Condition cond = al);
747 748 749 750 751 752

  void rsb(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);

  void add(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);
753
  void add(Register dst, Register src1, Register src2,
754
           SBit s = LeaveCC, Condition cond = al);
755 756 757 758 759 760 761 762 763 764 765

  void adc(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);

  void sbc(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);

  void rsc(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);

  void tst(Register src1, const Operand& src2, Condition cond = al);
766
  void tst(Register src1, Register src2, Condition cond = al);
767 768 769 770

  void teq(Register src1, const Operand& src2, Condition cond = al);

  void cmp(Register src1, const Operand& src2, Condition cond = al);
771 772
  void cmp(Register src1, Register src2, Condition cond = al);

773
  void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
774 775 776 777 778

  void cmn(Register src1, const Operand& src2, Condition cond = al);

  void orr(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);
779
  void orr(Register dst, Register src1, Register src2,
780
           SBit s = LeaveCC, Condition cond = al);
781 782 783

  void mov(Register dst, const Operand& src,
           SBit s = LeaveCC, Condition cond = al);
784
  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
785

786 787 788 789
  // Load the position of the label relative to the generated code object
  // pointer in a register.
  void mov_label_offset(Register dst, Label* label);

790
  // ARMv7 instructions for loading a 32 bit immediate in two instructions.
791
  // The constant for movw and movt should be in the range 0-0xffff.
792 793 794
  void movw(Register reg, uint32_t immediate, Condition cond = al);
  void movt(Register reg, uint32_t immediate, Condition cond = al);

795 796 797 798 799 800
  void bic(Register dst, Register src1, const Operand& src2,
           SBit s = LeaveCC, Condition cond = al);

  void mvn(Register dst, const Operand& src,
           SBit s = LeaveCC, Condition cond = al);

801 802 803
  // Shift instructions

  void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
804
           Condition cond = al);
805 806

  void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
807
           Condition cond = al);
808 809

  void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
810
           Condition cond = al);
811

812 813 814 815 816
  // Multiply instructions

  void mla(Register dst, Register src1, Register src2, Register srcA,
           SBit s = LeaveCC, Condition cond = al);

817 818 819 820 821 822
  void mls(Register dst, Register src1, Register src2, Register srcA,
           Condition cond = al);

  void sdiv(Register dst, Register src1, Register src2,
            Condition cond = al);

823 824
  void udiv(Register dst, Register src1, Register src2, Condition cond = al);

825 826 827
  void mul(Register dst, Register src1, Register src2,
           SBit s = LeaveCC, Condition cond = al);

828 829 830 831 832
  void smmla(Register dst, Register src1, Register src2, Register srcA,
             Condition cond = al);

  void smmul(Register dst, Register src1, Register src2, Condition cond = al);

833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
  void smlal(Register dstL, Register dstH, Register src1, Register src2,
             SBit s = LeaveCC, Condition cond = al);

  void smull(Register dstL, Register dstH, Register src1, Register src2,
             SBit s = LeaveCC, Condition cond = al);

  void umlal(Register dstL, Register dstH, Register src1, Register src2,
             SBit s = LeaveCC, Condition cond = al);

  void umull(Register dstL, Register dstH, Register src1, Register src2,
             SBit s = LeaveCC, Condition cond = al);

  // Miscellaneous arithmetic instructions

  void clz(Register dst, Register src, Condition cond = al);  // v5 and above

849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
  // Saturating instructions. v6 and above.

  // Unsigned saturate.
  //
  // Saturate an optionally shifted signed value to an unsigned range.
  //
  //   usat dst, #satpos, src
  //   usat dst, #satpos, src, lsl #sh
  //   usat dst, #satpos, src, asr #sh
  //
  // Register dst will contain:
  //
  //   0,                 if s < 0
  //   (1 << satpos) - 1, if s > ((1 << satpos) - 1)
  //   s,                 otherwise
  //
  // where s is the contents of src after shifting (if used.)
  void usat(Register dst, int satpos, const Operand& src, Condition cond = al);

868 869 870 871 872 873 874 875 876 877 878 879 880
  // Bitfield manipulation instructions. v7 and above.

  void ubfx(Register dst, Register src, int lsb, int width,
            Condition cond = al);

  void sbfx(Register dst, Register src, int lsb, int width,
            Condition cond = al);

  void bfc(Register dst, int lsb, int width, Condition cond = al);

  void bfi(Register dst, Register src, int lsb, int width,
           Condition cond = al);

881 882 883 884 885 886
  void pkhbt(Register dst, Register src1, const Operand& src2,
             Condition cond = al);

  void pkhtb(Register dst, Register src1, const Operand& src2,
             Condition cond = al);

887 888 889 890 891
  void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
  void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
             Condition cond = al);
  void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
  void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
892 893
             Condition cond = al);

894 895 896 897 898 899 900
  void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
  void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
             Condition cond = al);
  void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
  void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
  void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
             Condition cond = al);
901

902 903 904
  // Reverse the bits in a register.
  void rbit(Register dst, Register src, Condition cond = al);

905 906 907 908 909 910 911 912 913 914 915 916 917 918
  // Status register access instructions

  void mrs(Register dst, SRegister s, Condition cond = al);
  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);

  // Load/Store instructions
  void ldr(Register dst, const MemOperand& src, Condition cond = al);
  void str(Register src, const MemOperand& dst, Condition cond = al);
  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
  void strb(Register src, const MemOperand& dst, Condition cond = al);
  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
  void strh(Register src, const MemOperand& dst, Condition cond = al);
  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
919 920 921 922 923 924
  void ldrd(Register dst1,
            Register dst2,
            const MemOperand& src, Condition cond = al);
  void strd(Register src1,
            Register src2,
            const MemOperand& dst, Condition cond = al);
925

926 927 928
  // Load literal from a pc relative address.
  void ldr_pcrel(Register dst, int imm12, Condition cond = al);

929 930 931 932 933 934 935 936
  // Load/Store exclusive instructions
  void ldrex(Register dst, Register src, Condition cond = al);
  void strex(Register src1, Register src2, Register dst, Condition cond = al);
  void ldrexb(Register dst, Register src, Condition cond = al);
  void strexb(Register src1, Register src2, Register dst, Condition cond = al);
  void ldrexh(Register dst, Register src, Condition cond = al);
  void strexh(Register src1, Register src2, Register dst, Condition cond = al);

937 938 939
  // Preload instructions
  void pld(const MemOperand& address);

940 941 942 943 944
  // Load/Store multiple instructions
  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);

  // Exception-generating instructions and debugging support
945 946 947
  void stop(const char* msg,
            Condition cond = al,
            int32_t code = kDefaultStopCode);
948 949

  void bkpt(uint32_t imm16);  // v5 and above
950
  void svc(uint32_t imm24, Condition cond = al);
951

952 953
  // Synchronization instructions.
  // On ARMv6, an equivalent CP15 operation will be used.
954 955 956 957
  void dmb(BarrierOption option);
  void dsb(BarrierOption option);
  void isb(BarrierOption option);

958 959 960
  // Conditional speculation barrier.
  void csdb();

961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
  // Coprocessor instructions

  void cdp(Coprocessor coproc, int opcode_1,
           CRegister crd, CRegister crn, CRegister crm,
           int opcode_2, Condition cond = al);

  void cdp2(Coprocessor coproc, int opcode_1,
            CRegister crd, CRegister crn, CRegister crm,
            int opcode_2);  // v5 and above

  void mcr(Coprocessor coproc, int opcode_1,
           Register rd, CRegister crn, CRegister crm,
           int opcode_2 = 0, Condition cond = al);

  void mcr2(Coprocessor coproc, int opcode_1,
            Register rd, CRegister crn, CRegister crm,
            int opcode_2 = 0);  // v5 and above

  void mrc(Coprocessor coproc, int opcode_1,
           Register rd, CRegister crn, CRegister crm,
           int opcode_2 = 0, Condition cond = al);

  void mrc2(Coprocessor coproc, int opcode_1,
            Register rd, CRegister crn, CRegister crm,
            int opcode_2 = 0);  // v5 and above

  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
           LFlag l = Short, Condition cond = al);
  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
           LFlag l = Short, Condition cond = al);

  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
            LFlag l = Short);  // v5 and above
  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
            LFlag l = Short);  // v5 and above

997
  // Support for VFP.
998
  // All these APIs support S0 to S31 and D0 to D31.
999

1000 1001
  void vldr(const DwVfpRegister dst,
            const Register base,
1002 1003 1004 1005
            int offset,
            const Condition cond = al);
  void vldr(const DwVfpRegister dst,
            const MemOperand& src,
1006
            const Condition cond = al);
1007 1008 1009

  void vldr(const SwVfpRegister dst,
            const Register base,
1010 1011 1012 1013
            int offset,
            const Condition cond = al);
  void vldr(const SwVfpRegister dst,
            const MemOperand& src,
1014 1015
            const Condition cond = al);

1016 1017
  void vstr(const DwVfpRegister src,
            const Register base,
1018 1019 1020 1021
            int offset,
            const Condition cond = al);
  void vstr(const DwVfpRegister src,
            const MemOperand& dst,
1022
            const Condition cond = al);
1023

1024 1025
  void vstr(const SwVfpRegister src,
            const Register base,
1026 1027 1028 1029
            int offset,
            const Condition cond = al);
  void vstr(const SwVfpRegister src,
            const MemOperand& dst,
1030 1031
            const Condition cond = al);

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
  void vldm(BlockAddrMode am,
            Register base,
            DwVfpRegister first,
            DwVfpRegister last,
            Condition cond = al);

  void vstm(BlockAddrMode am,
            Register base,
            DwVfpRegister first,
            DwVfpRegister last,
            Condition cond = al);

  void vldm(BlockAddrMode am,
            Register base,
            SwVfpRegister first,
            SwVfpRegister last,
            Condition cond = al);

  void vstm(BlockAddrMode am,
            Register base,
            SwVfpRegister first,
            SwVfpRegister last,
            Condition cond = al);

1056
  void vmov(const SwVfpRegister dst, Float32 imm);
1057
  void vmov(const DwVfpRegister dst,
1058
            Double imm,
1059
            const Register extra_scratch = no_reg);
1060 1061 1062
  void vmov(const SwVfpRegister dst,
            const SwVfpRegister src,
            const Condition cond = al);
1063 1064 1065
  void vmov(const DwVfpRegister dst,
            const DwVfpRegister src,
            const Condition cond = al);
1066 1067 1068 1069 1070 1071 1072
  void vmov(const DwVfpRegister dst,
            const Register src1,
            const Register src2,
            const Condition cond = al);
  void vmov(const Register dst1,
            const Register dst2,
            const DwVfpRegister src,
1073
            const Condition cond = al);
1074
  void vmov(const SwVfpRegister dst,
1075 1076
            const Register src,
            const Condition cond = al);
1077 1078 1079
  void vmov(const Register dst,
            const SwVfpRegister src,
            const Condition cond = al);
1080 1081
  void vcvt_f64_s32(const DwVfpRegister dst,
                    const SwVfpRegister src,
1082
                    VFPConversionMode mode = kDefaultRoundToZero,
1083 1084 1085
                    const Condition cond = al);
  void vcvt_f32_s32(const SwVfpRegister dst,
                    const SwVfpRegister src,
1086
                    VFPConversionMode mode = kDefaultRoundToZero,
1087 1088 1089
                    const Condition cond = al);
  void vcvt_f64_u32(const DwVfpRegister dst,
                    const SwVfpRegister src,
1090 1091
                    VFPConversionMode mode = kDefaultRoundToZero,
                    const Condition cond = al);
1092 1093 1094 1095
  void vcvt_f32_u32(const SwVfpRegister dst,
                    const SwVfpRegister src,
                    VFPConversionMode mode = kDefaultRoundToZero,
                    const Condition cond = al);
1096 1097
  void vcvt_s32_f32(const SwVfpRegister dst,
                    const SwVfpRegister src,
1098 1099 1100 1101
                    VFPConversionMode mode = kDefaultRoundToZero,
                    const Condition cond = al);
  void vcvt_u32_f32(const SwVfpRegister dst,
                    const SwVfpRegister src,
1102
                    VFPConversionMode mode = kDefaultRoundToZero,
1103 1104 1105
                    const Condition cond = al);
  void vcvt_s32_f64(const SwVfpRegister dst,
                    const DwVfpRegister src,
1106
                    VFPConversionMode mode = kDefaultRoundToZero,
1107 1108 1109
                    const Condition cond = al);
  void vcvt_u32_f64(const SwVfpRegister dst,
                    const DwVfpRegister src,
1110
                    VFPConversionMode mode = kDefaultRoundToZero,
1111 1112 1113
                    const Condition cond = al);
  void vcvt_f64_f32(const DwVfpRegister dst,
                    const SwVfpRegister src,
1114
                    VFPConversionMode mode = kDefaultRoundToZero,
1115 1116 1117
                    const Condition cond = al);
  void vcvt_f32_f64(const SwVfpRegister dst,
                    const DwVfpRegister src,
1118
                    VFPConversionMode mode = kDefaultRoundToZero,
1119
                    const Condition cond = al);
1120 1121 1122
  void vcvt_f64_s32(const DwVfpRegister dst,
                    int fraction_bits,
                    const Condition cond = al);
1123

1124 1125 1126
  void vmrs(const Register dst, const Condition cond = al);
  void vmsr(const Register dst, const Condition cond = al);

1127 1128 1129
  void vneg(const DwVfpRegister dst,
            const DwVfpRegister src,
            const Condition cond = al);
1130 1131
  void vneg(const SwVfpRegister dst, const SwVfpRegister src,
            const Condition cond = al);
1132 1133 1134
  void vabs(const DwVfpRegister dst,
            const DwVfpRegister src,
            const Condition cond = al);
1135 1136
  void vabs(const SwVfpRegister dst, const SwVfpRegister src,
            const Condition cond = al);
1137 1138 1139 1140
  void vadd(const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2,
            const Condition cond = al);
1141 1142
  void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
            const SwVfpRegister src2, const Condition cond = al);
1143 1144 1145 1146
  void vsub(const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2,
            const Condition cond = al);
1147 1148
  void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
            const SwVfpRegister src2, const Condition cond = al);
1149 1150 1151 1152
  void vmul(const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2,
            const Condition cond = al);
1153 1154
  void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
            const SwVfpRegister src2, const Condition cond = al);
1155 1156 1157 1158
  void vmla(const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2,
            const Condition cond = al);
1159 1160
  void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
            const SwVfpRegister src2, const Condition cond = al);
1161 1162 1163 1164
  void vmls(const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2,
            const Condition cond = al);
1165 1166
  void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
            const SwVfpRegister src2, const Condition cond = al);
1167 1168 1169 1170
  void vdiv(const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2,
            const Condition cond = al);
1171 1172
  void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
            const SwVfpRegister src2, const Condition cond = al);
1173 1174
  void vcmp(const DwVfpRegister src1,
            const DwVfpRegister src2,
1175
            const Condition cond = al);
1176 1177
  void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
            const Condition cond = al);
1178 1179 1180
  void vcmp(const DwVfpRegister src1,
            const double src2,
            const Condition cond = al);
1181
  void vcmp(const SwVfpRegister src1, const float src2,
1182
            const Condition cond = al);
1183

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
  void vmaxnm(const DwVfpRegister dst,
              const DwVfpRegister src1,
              const DwVfpRegister src2);
  void vmaxnm(const SwVfpRegister dst,
              const SwVfpRegister src1,
              const SwVfpRegister src2);
  void vminnm(const DwVfpRegister dst,
              const DwVfpRegister src1,
              const DwVfpRegister src2);
  void vminnm(const SwVfpRegister dst,
              const SwVfpRegister src1,
              const SwVfpRegister src2);

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
  // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
  void vsel(const Condition cond,
            const DwVfpRegister dst,
            const DwVfpRegister src1,
            const DwVfpRegister src2);
  void vsel(const Condition cond,
            const SwVfpRegister dst,
            const SwVfpRegister src1,
            const SwVfpRegister src2);

1207 1208 1209
  void vsqrt(const DwVfpRegister dst,
             const DwVfpRegister src,
             const Condition cond = al);
1210 1211
  void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
             const Condition cond = al);
1212

1213
  // ARMv8 rounding instructions.
1214
  void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
1215
  void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
1216
  void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
1217
  void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
1218
  void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
1219
  void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
1220
  void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
1221
  void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
1222 1223
  void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
              const Condition cond = al);
1224 1225 1226
  void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
              const Condition cond = al);

1227
  // Support for NEON.
1228

1229
  // All these APIs support D0 to D31 and Q0 to Q15.
1230 1231 1232 1233 1234 1235
  void vld1(NeonSize size,
            const NeonListOperand& dst,
            const NeonMemOperand& src);
  void vst1(NeonSize size,
            const NeonListOperand& src,
            const NeonMemOperand& dst);
1236
  // dt represents the narrower type
1237
  void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
1238 1239
  // dt represents the narrower type.
  void vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src);
1240

1241 1242 1243 1244
  // Only unconditional core <-> scalar moves are currently supported.
  void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
  void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);

1245 1246
  void vmov(QwNeonRegister dst, QwNeonRegister src);
  void vdup(NeonSize size, QwNeonRegister dst, Register src);
1247 1248
  void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index);
  void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index);
1249 1250 1251 1252 1253 1254 1255

  void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
  void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
  void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src);
  void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src);

  void vmvn(QwNeonRegister dst, QwNeonRegister src);
1256 1257
  void vswp(DwVfpRegister dst, DwVfpRegister src);
  void vswp(QwNeonRegister dst, QwNeonRegister src);
1258 1259 1260 1261 1262
  void vabs(QwNeonRegister dst, QwNeonRegister src);
  void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
  void vneg(QwNeonRegister dst, QwNeonRegister src);
  void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src);

1263
  void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1264
  void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
1265
  void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1266
  void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1267
  void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1268 1269 1270
  void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
1271 1272
  void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
             QwNeonRegister src2);
1273 1274 1275
  void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
1276 1277
  void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
             QwNeonRegister src2);
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
  void vmul(QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
  void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
  void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vmin(NeonDataType dt, QwNeonRegister dst,
            QwNeonRegister src1, QwNeonRegister src2);
  void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vmax(NeonDataType dt, QwNeonRegister dst,
            QwNeonRegister src1, QwNeonRegister src2);
1288 1289 1290
  void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
  void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
             DwVfpRegister src2);
1291 1292 1293 1294
  void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
             DwVfpRegister src2);
  void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
             DwVfpRegister src2);
1295 1296
  void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
  void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
1297 1298
  void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
  void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
1299
  // vrecpe and vrsqrte only support floating point lanes.
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
  void vrecpe(QwNeonRegister dst, QwNeonRegister src);
  void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
  void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
  void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
  void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
  void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1310 1311
  void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
1312
  void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1313 1314 1315 1316
  void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
            QwNeonRegister src2);
  void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2,
            int bytes);
1317
  void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1318
  void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1319
  void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1320 1321 1322 1323
  void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
  void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
  void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
  void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1324
  void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1325 1326 1327 1328 1329
  void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
  void vtbl(DwVfpRegister dst, const NeonListOperand& list,
            DwVfpRegister index);
  void vtbx(DwVfpRegister dst, const NeonListOperand& list,
            DwVfpRegister index);
1330

1331
  // Pseudo instructions
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347

  // Different nop operations are used by the code generator to detect certain
  // states of the generated code.
  enum NopMarkerTypes {
    NON_MARKING_NOP = 0,
    DEBUG_BREAK_NOP,
    // IC markers.
    PROPERTY_ACCESS_INLINED,
    PROPERTY_ACCESS_INLINED_CONTEXT,
    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
    // Helper values.
    LAST_CODE_MARKER,
    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
  };

  void nop(int type = 0);   // 0 is the default non-marking type.
1348

1349 1350
  void push(Register src, Condition cond = al) {
    str(src, MemOperand(sp, 4, NegPreIndex), cond);
1351 1352
  }

1353 1354
  void pop(Register dst, Condition cond = al) {
    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
1355 1356
  }

1357
  void pop();
1358

1359 1360 1361 1362
  void vpush(QwNeonRegister src, Condition cond = al) {
    vstm(db_w, sp, src.low(), src.high(), cond);
  }

1363 1364 1365 1366
  void vpush(DwVfpRegister src, Condition cond = al) {
    vstm(db_w, sp, src, src, cond);
  }

1367 1368 1369 1370
  void vpush(SwVfpRegister src, Condition cond = al) {
    vstm(db_w, sp, src, src, cond);
  }

1371 1372 1373 1374
  void vpop(DwVfpRegister dst, Condition cond = al) {
    vldm(ia_w, sp, dst, dst, cond);
  }

1375 1376 1377
  // Jump unconditionally to given label.
  void jmp(Label* L) { b(L, al); }

1378
  // Check the code size generated from label to here.
1379 1380 1381 1382 1383 1384 1385
  int SizeOfCodeGeneratedSince(Label* label) {
    return pc_offset() - label->pos();
  }

  // Check the number of instructions generated from label to here.
  int InstructionsGeneratedSince(Label* label) {
    return SizeOfCodeGeneratedSince(label) / kInstrSize;
1386
  }
1387

1388
  // Check whether an immediate fits an addressing mode 1 instruction.
1389
  static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
1390

1391 1392 1393
  // Check whether an immediate fits an addressing mode 2 instruction.
  bool ImmediateFitsAddrMode2Instruction(int32_t imm32);

1394 1395 1396 1397
  // Class for scoping postponing the constant pool generation.
  class BlockConstPoolScope {
   public:
    explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1398
      assem_->StartBlockConstPool();
1399 1400
    }
    ~BlockConstPoolScope() {
1401
      assem_->EndBlockConstPool();
1402 1403 1404 1405
    }

   private:
    Assembler* assem_;
sgjesse@chromium.org's avatar
sgjesse@chromium.org committed
1406 1407

    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1408
  };
1409

1410
  // Record a comment relocation entry that can be used by a disassembler.
1411
  // Use --code-comments to enable.
1412 1413
  void RecordComment(const char* msg);

1414 1415
  // Record a deoptimization reason that can be used by a log or cpu profiler.
  // Use --trace-deopt to enable.
1416 1417
  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
                         int id);
1418

1419 1420 1421 1422 1423 1424 1425 1426
  // Record the emission of a constant pool.
  //
  // The emission of constant pool depends on the size of the code generated and
  // the number of RelocInfo recorded.
  // The Debug mechanism needs to map code offsets between two versions of a
  // function, compiled with and without debugger support (see for example
  // Debug::PrepareForBreakPoints()).
  // Compiling functions with debugger support generates additional code
1427 1428 1429
  // (DebugCodegen::GenerateSlot()). This may affect the emission of the
  // constant pools and cause the version of the code with debugger support to
  // have constant pools generated in different places.
1430 1431 1432 1433 1434 1435 1436 1437
  // Recording the position and size of emitted constant pools allows to
  // correctly compute the offset mappings between the different versions of a
  // function in all situations.
  //
  // The parameter indicates the size of the constant pool (in bytes), including
  // the marker and branch over the data.
  void RecordConstPool(int size);

1438
  // Writes a single byte or word of data in the code stream.  Used
1439 1440
  // for inline tables, e.g., jump-tables. CheckConstantPool() should be
  // called before any use of db/dd/dq/dp to ensure that constant pools
1441
  // are not emitted as part of the tables generated.
1442 1443
  void db(uint8_t data);
  void dd(uint32_t data);
1444 1445
  void dq(uint64_t data);
  void dp(uintptr_t data) { dd(data); }
1446

1447
  // Read/patch instructions
1448 1449 1450 1451
  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
  void instr_at_put(int pos, Instr instr) {
    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
  }
1452 1453
  static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
  static void instr_at_put(Address pc, Instr instr) {
1454 1455
    *reinterpret_cast<Instr*>(pc) = instr;
  }
1456
  static Condition GetCondition(Instr instr);
1457
  static bool IsLdrRegisterImmediate(Instr instr);
1458
  static bool IsVldrDRegisterImmediate(Instr instr);
1459
  static int GetLdrRegisterImmediateOffset(Instr instr);
1460
  static int GetVldrDRegisterImmediateOffset(Instr instr);
1461
  static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
1462
  static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
1463 1464 1465 1466
  static bool IsStrRegisterImmediate(Instr instr);
  static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
  static bool IsAddRegisterImmediate(Instr instr);
  static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
1467
  static Register GetRd(Instr instr);
1468 1469
  static Register GetRn(Instr instr);
  static Register GetRm(Instr instr);
1470 1471 1472 1473 1474 1475
  static bool IsPush(Instr instr);
  static bool IsPop(Instr instr);
  static bool IsStrRegFpOffset(Instr instr);
  static bool IsLdrRegFpOffset(Instr instr);
  static bool IsStrRegFpNegOffset(Instr instr);
  static bool IsLdrRegFpNegOffset(Instr instr);
1476
  static bool IsLdrPcImmediateOffset(Instr instr);
1477
  static bool IsVldrDPcImmediateOffset(Instr instr);
1478 1479
  static bool IsBlxReg(Instr instr);
  static bool IsBlxIp(Instr instr);
1480 1481 1482 1483 1484
  static bool IsTstImmediate(Instr instr);
  static bool IsCmpRegister(Instr instr);
  static bool IsCmpImmediate(Instr instr);
  static Register GetCmpImmediateRegister(Instr instr);
  static int GetCmpImmediateRawImmediate(Instr instr);
1485
  static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1486 1487
  static bool IsMovImmed(Instr instr);
  static bool IsOrrImmed(Instr instr);
1488
  static bool IsMovT(Instr instr);
1489
  static Instr GetMovTPattern();
1490
  static bool IsMovW(Instr instr);
1491 1492 1493
  static Instr GetMovWPattern();
  static Instr EncodeMovwImmediate(uint32_t immediate);
  static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
1494 1495
  static int DecodeShiftImm(Instr instr);
  static Instr PatchShiftImm(Instr instr, int immed);
1496

1497
  // Constants in pools are accessed via pc relative addressing, which can
1498 1499 1500
  // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
  // PC-relative loads, thereby defining a maximum distance between the
  // instruction and the accessed constant.
1501 1502
  static constexpr int kMaxDistToIntPool = 4 * KB;
  static constexpr int kMaxDistToFPPool = 1 * KB;
1503
  // All relocations could be integer, it therefore acts as the limit.
1504 1505 1506 1507 1508
  static constexpr int kMinNumPendingConstants = 4;
  static constexpr int kMaxNumPending32Constants =
      kMaxDistToIntPool / kInstrSize;
  static constexpr int kMaxNumPending64Constants =
      kMaxDistToFPPool / kInstrSize;
1509

1510 1511 1512 1513 1514
  // Postpone the generation of the constant pool for the specified number of
  // instructions.
  void BlockConstPoolFor(int instructions);

  // Check if is time to emit a constant pool.
1515
  void CheckConstPool(bool force_emit, bool require_jump);
1516

1517 1518 1519 1520 1521 1522
  void MaybeCheckConstPool() {
    if (pc_offset() >= next_buffer_check_) {
      CheckConstPool(false, true);
    }
  }

1523 1524
  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
                                          ConstantPoolEntry::Access access,
1525 1526 1527 1528
                                          ConstantPoolEntry::Type type) {
    // No embedded constant pool support.
    UNREACHABLE();
  }
1529

1530 1531 1532
  // Move a 32-bit immediate into a register, potentially via the constant pool.
  void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);

1533 1534 1535 1536
  // Get the code target object for a pc-relative call or jump.
  V8_INLINE Handle<Code> relative_code_target_object_handle_at(
      Address pc_) const;

1537 1538 1539
 protected:
  int buffer_space() const { return reloc_info_writer.pos() - pc_; }

1540 1541 1542 1543 1544 1545
  // Decode branch instruction at pos and return branch target pos
  int target_at(int pos);

  // Patch branch instruction at pos to branch to given branch target pos
  void target_at_put(int pos, int target_pos);

1546
  // Prevent contant pool emission until EndBlockConstPool is called.
1547
  // Calls to this function can be nested but must be followed by an equal
1548
  // number of call to EndBlockConstpool.
1549
  void StartBlockConstPool() {
1550 1551 1552 1553 1554
    if (const_pool_blocked_nesting_++ == 0) {
      // Prevent constant pool checks happening by setting the next check to
      // the biggest possible offset.
      next_buffer_check_ = kMaxInt;
    }
1555
  }
1556

1557
  // Resume constant pool emission. Needs to be called as many times as
1558
  // StartBlockConstPool to have an effect.
1559
  void EndBlockConstPool() {
1560
    if (--const_pool_blocked_nesting_ == 0) {
1561 1562 1563
#ifdef DEBUG
      // Max pool start (if we need a jump and an alignment).
      int start = pc_offset() + kInstrSize + 2 * kPointerSize;
1564
      // Check the constant pool hasn't been blocked for too long.
1565 1566
      DCHECK(pending_32_bit_constants_.empty() ||
             (start + pending_64_bit_constants_.size() * kDoubleSize <
1567 1568
              static_cast<size_t>(first_const_pool_32_use_ +
                                  kMaxDistToIntPool)));
1569
      DCHECK(pending_64_bit_constants_.empty() ||
1570 1571
             (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
      // Two cases:
      //  * no_const_pool_before_ >= next_buffer_check_ and the emission is
      //    still blocked
      //  * no_const_pool_before_ < next_buffer_check_ and the next emit will
      //    trigger a check.
      next_buffer_check_ = no_const_pool_before_;
    }
  }

  bool is_const_pool_blocked() const {
    return (const_pool_blocked_nesting_ > 0) ||
           (pc_offset() < no_const_pool_before_);
1584 1585
  }

1586 1587 1588
  bool VfpRegisterIsAvailable(DwVfpRegister reg) {
    DCHECK(reg.is_valid());
    return IsEnabled(VFP32DREGS) ||
1589
           (reg.code() < LowDwVfpRegister::kNumRegisters);
1590 1591
  }

1592 1593 1594
  bool VfpRegisterIsAvailable(QwNeonRegister reg) {
    DCHECK(reg.is_valid());
    return IsEnabled(VFP32DREGS) ||
1595
           (reg.code() < LowDwVfpRegister::kNumRegisters / 2);
1596 1597
  }

1598
  inline void emit(Instr x);
1599 1600 1601 1602 1603 1604

  // Code generation
  // The relocation writer's position is at least kGap bytes below the end of
  // the generated instructions. This is so that multi-instruction sequences do
  // not have to check for overflow. The same is true for writes of large
  // relocation info entries.
1605
  static constexpr int kGap = 32;
1606

1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
  // Relocation info generation
  // Each relocation is encoded as a variable size value
  static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
  RelocInfoWriter reloc_info_writer;

  // ConstantPoolEntry records are used during code generation as temporary
  // containers for constants and code target addresses until they are emitted
  // to the constant pool. These records are temporarily stored in a separate
  // buffer until a constant pool is emitted.
  // If every instruction in a long sequence is accessing the pool, we need one
  // pending relocation entry per instruction.

  // The buffers of pending constant pool entries.
  std::vector<ConstantPoolEntry> pending_32_bit_constants_;
  std::vector<ConstantPoolEntry> pending_64_bit_constants_;

1623 1624 1625
  // Map of address of handle to index in pending_32_bit_constants_.
  std::map<Address, int> handle_to_index_map_;

1626 1627
  // Scratch registers available for use by the Assembler.
  RegList scratch_register_list_;
1628
  VfpRegList scratch_vfp_register_list_;
1629

1630
 private:
1631 1632 1633
  // Avoid overflows for displacements etc.
  static const int kMaximalBufferSize = 512 * MB;

1634 1635
  int next_buffer_check_;  // pc offset of next buffer check

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
  // Constant pool generation
  // Pools are emitted in the instruction stream, preferably after unconditional
  // jumps or after returns from functions (in dead code locations).
  // If a long code sequence does not contain unconditional jumps, it is
  // necessary to emit the constant pool before the pool gets too far from the
  // location it is accessed from. In this case, we emit a jump over the emitted
  // constant pool.
  // Constants in the pool may be addresses of functions that gets relocated;
  // if so, a relocation info entry is associated to the constant pool entry.

  // Repeated checking whether the constant pool should be emitted is rather
  // expensive. By default we only check again once a number of instructions
  // has been generated. That also means that the sizing of the buffers is not
  // an exact science, and that we rely on some slop to not overrun buffers.
1650 1651
  static constexpr int kCheckPoolIntervalInst = 32;
  static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
1652

1653 1654 1655
  // Emission of the constant pool may be blocked in some code sequences.
  int const_pool_blocked_nesting_;  // Block emission if this is not zero.
  int no_const_pool_before_;  // Block emission before this pc offset.
1656

1657 1658
  // Keep track of the first instruction requiring a constant pool entry
  // since the previous constant pool was emitted.
1659 1660
  int first_const_pool_32_use_;
  int first_const_pool_64_use_;
1661

1662
  // The bound position, before this we cannot do instruction elimination.
1663 1664 1665 1666 1667 1668
  int last_bound_pos_;

  inline void CheckBuffer();
  void GrowBuffer();

  // Instruction generation
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
  void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
  // Attempt to encode operand |x| for instruction |instr| and return true on
  // success. The result will be encoded in |instr| directly. This method may
  // change the opcode if deemed beneficial, for instance, MOV may be turned
  // into MVN, ADD into SUB, AND into BIC, ...etc.  The only reason this method
  // may fail is that the operand is an immediate that cannot be encoded.
  bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);

  void AddrMode2(Instr instr, Register rd, const MemOperand& x);
  void AddrMode3(Instr instr, Register rd, const MemOperand& x);
  void AddrMode4(Instr instr, Register rn, RegList rl);
  void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
1681 1682

  // Labels
1683
  void print(const Label* L);
1684 1685 1686 1687
  void bind_to(Label* L, int pos);
  void next(Label* L);

  // Record reloc info for current pc_
1688
  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1689 1690
  void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
                            intptr_t value);
1691
  void ConstantPoolAddEntry(int position, Double value);
1692
  void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
lrn@chromium.org's avatar
lrn@chromium.org committed
1693

1694
  friend class RelocInfo;
1695
  friend class BlockConstPoolScope;
1696
  friend class EnsureSpace;
1697
  friend class UseScratchRegisterScope;
1698 1699 1700 1701
};

class EnsureSpace BASE_EMBEDDED {
 public:
1702
  V8_INLINE explicit EnsureSpace(Assembler* assembler);
1703 1704
};

1705 1706
class PatchingAssembler : public Assembler {
 public:
1707 1708
  PatchingAssembler(const AssemblerOptions& options, byte* address,
                    int instructions);
1709 1710 1711 1712 1713
  ~PatchingAssembler();

  void Emit(Address addr);
};

1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729
// This scope utility allows scratch registers to be managed safely. The
// Assembler's GetScratchRegisterList() is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
// When the scope ends, the Assembler's list will be restored to its original
// state, even if the list is modified by some other means. Note that this scope
// can be nested but the destructors need to run in the opposite order as the
// constructors. We do not have assertions for this.
class UseScratchRegisterScope {
 public:
  explicit UseScratchRegisterScope(Assembler* assembler);
  ~UseScratchRegisterScope();

  // Take a register from the list and return it.
  Register Acquire();
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
  SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
  LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
  DwVfpRegister AcquireD() {
    DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
    DCHECK(assembler_->VfpRegisterIsAvailable(reg));
    return reg;
  }
  QwNeonRegister AcquireQ() {
    QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
    DCHECK(assembler_->VfpRegisterIsAvailable(reg));
    return reg;
  }
1742

1743 1744 1745 1746
  // Check if we have registers available to acquire.
  bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
  bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }

1747
 private:
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
  friend class Assembler;
  friend class TurboAssembler;

  template <typename T>
  bool CanAcquireVfp() const;

  template <typename T>
  T AcquireVfp();

  Assembler* assembler_;
1758 1759
  // Available scratch registers at the start of this scope.
  RegList old_available_;
1760
  VfpRegList old_available_vfp_;
1761
};
1762

1763 1764
}  // namespace internal
}  // namespace v8
1765

1766
#endif  // V8_ARM_ASSEMBLER_ARM_H_