assembler-mips.h 53.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// The original source code covered by the above license above has been
// modified significantly by Google Inc.
33
// Copyright 2012 the V8 project authors. All rights reserved.
34 35 36 37 38 39


#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
#define V8_MIPS_ASSEMBLER_MIPS_H_

#include <stdio.h>
40

41 42
#include <set>

43 44
#include "src/assembler.h"
#include "src/mips/constants-mips.h"
45 46 47 48

namespace v8 {
namespace internal {

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
// clang-format off
#define GENERAL_REGISTERS(V)                              \
  V(zero_reg)  V(at)  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3)  \
  V(t0)  V(t1)  V(t2)  V(t3)  V(t4)  V(t5)  V(t6)  V(t7)  \
  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  V(t8)  V(t9) \
  V(k0)  V(k1)  V(gp)  V(sp)  V(fp)  V(ra)

#define ALLOCATABLE_GENERAL_REGISTERS(V) \
  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3) \
  V(t0)  V(t1)  V(t2)  V(t3)  V(t4)  V(t5)  V(t6) V(s7)

#define DOUBLE_REGISTERS(V)                               \
  V(f0)  V(f1)  V(f2)  V(f3)  V(f4)  V(f5)  V(f6)  V(f7)  \
  V(f8)  V(f9)  V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
  V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
  V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)

66
#define FLOAT_REGISTERS DOUBLE_REGISTERS
67
#define SIMD128_REGISTERS DOUBLE_REGISTERS
68

69 70
#define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
  V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
71
  V(f16) V(f18) V(f20) V(f22) V(f24)
72 73
// clang-format on

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.


// -----------------------------------------------------------------------------
97
// Implementation of Register and FPURegister.
98 99

struct Register {
100
  static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
101

102 103 104 105 106 107 108 109 110 111
  enum Code {
#define REGISTER_CODE(R) kCode_##R,
    GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
        kAfterLast,
    kCode_no_reg = -1
  };

  static const int kNumRegisters = Code::kAfterLast;

112 113 114 115 116 117 118 119 120 121
#if defined(V8_TARGET_LITTLE_ENDIAN)
  static const int kMantissaOffset = 0;
  static const int kExponentOffset = 4;
#elif defined(V8_TARGET_BIG_ENDIAN)
  static const int kMantissaOffset = 4;
  static const int kExponentOffset = 0;
#else
#error Unknown endianness
#endif

122 123

  static Register from_code(int code) {
124 125 126
    DCHECK(code >= 0);
    DCHECK(code < kNumRegisters);
    Register r = {code};
127 128
    return r;
  }
129 130
  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
  bool is(Register reg) const { return reg_code == reg.reg_code; }
131
  int code() const {
132
    DCHECK(is_valid());
133
    return reg_code;
134
  }
135
  int bit() const {
136
    DCHECK(is_valid());
137
    return 1 << reg_code;
138 139 140
  }

  // Unfortunately we can't make this private in a struct.
141
  int reg_code;
142 143
};

144 145 146 147 148 149 150
// s7: context register
// s3: lithium scratch
// s4: lithium scratch2
#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
151

152 153 154 155 156

int ToNumber(Register reg);

Register ToRegister(int num);

157 158
static const bool kSimpleFPAliasing = true;

159
// Coprocessor register.
160
struct FPURegister {
161 162 163 164 165 166 167
  enum Code {
#define REGISTER_CODE(R) kCode_##R,
    DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
        kAfterLast,
    kCode_no_reg = -1
  };
168

169
  static const int kMaxNumRegisters = Code::kAfterLast;
170 171 172

  inline static int NumRegisters();

173 174 175
  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
  // number of Double regs (64-bit regs, or FPU-reg-pairs).
176

177
  bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
178 179
  bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
  FPURegister low() const {
180
    // Find low reg of a Double-reg pair, which is the reg itself.
181
    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
182
    FPURegister reg;
183
    reg.reg_code = reg_code;
184
    DCHECK(reg.is_valid());
185 186
    return reg;
  }
187
  FPURegister high() const {
188
    // Find high reg of a Doubel-reg pair, which is reg + 1.
189
    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
190
    FPURegister reg;
191
    reg.reg_code = reg_code + 1;
192
    DCHECK(reg.is_valid());
193 194 195
    return reg;
  }

196
  int code() const {
197
    DCHECK(is_valid());
198
    return reg_code;
199
  }
200
  int bit() const {
201
    DCHECK(is_valid());
202 203 204
    return 1 << reg_code;
  }

205 206
  static FPURegister from_code(int code) {
    FPURegister r = {code};
207
    return r;
208
  }
209
  void setcode(int f) {
210
    reg_code = f;
211
    DCHECK(is_valid());
212
  }
213
  // Unfortunately we can't make this private in a struct.
214
  int reg_code;
215 216
};

217 218 219 220 221
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
//  f28: 0.0
//  f30: scratch register.

222 223 224 225 226 227 228 229 230
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
// on f0 really uses f0 and f1.
// (Modern mips hardware also supports 32 64-bit registers, via setting
// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
// but it is not in common use. Someday we will want to support this in v8.)

// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
231 232 233 234 235 236
typedef FPURegister FloatRegister;

typedef FPURegister DoubleRegister;

// TODO(mips) Define SIMD registers.
typedef FPURegister Simd128Register;
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

const DoubleRegister no_freg = {-1};

const DoubleRegister f0 = {0};  // Return value in hard float mode.
const DoubleRegister f1 = {1};
const DoubleRegister f2 = {2};
const DoubleRegister f3 = {3};
const DoubleRegister f4 = {4};
const DoubleRegister f5 = {5};
const DoubleRegister f6 = {6};
const DoubleRegister f7 = {7};
const DoubleRegister f8 = {8};
const DoubleRegister f9 = {9};
const DoubleRegister f10 = {10};
const DoubleRegister f11 = {11};
const DoubleRegister f12 = {12};  // Arg 0 in hard float mode.
const DoubleRegister f13 = {13};
const DoubleRegister f14 = {14};  // Arg 1 in hard float mode.
const DoubleRegister f15 = {15};
const DoubleRegister f16 = {16};
const DoubleRegister f17 = {17};
const DoubleRegister f18 = {18};
const DoubleRegister f19 = {19};
const DoubleRegister f20 = {20};
const DoubleRegister f21 = {21};
const DoubleRegister f22 = {22};
const DoubleRegister f23 = {23};
const DoubleRegister f24 = {24};
const DoubleRegister f25 = {25};
const DoubleRegister f26 = {26};
const DoubleRegister f27 = {27};
const DoubleRegister f28 = {28};
const DoubleRegister f29 = {29};
const DoubleRegister f30 = {30};
const DoubleRegister f31 = {31};
272

273 274
// Register aliases.
// cp is assumed to be a callee saved register.
275 276 277 278 279 280 281 282 283
// Defined using #define instead of "static const Register&" because Clang
// complains otherwise when a compilation unit that includes this header
// doesn't use the variables.
#define kRootRegister s6
#define cp s7
#define kLithiumScratchReg s3
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
284
// Used on mips32r6 for compare operations.
285
#define kDoubleCompareReg f26
286

287 288 289
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
290 291
  bool is_valid() const { return reg_code == kFCSRRegister; }
  bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
292
  int code() const {
293
    DCHECK(is_valid());
294
    return reg_code;
295 296
  }
  int bit() const {
297
    DCHECK(is_valid());
298
    return 1 << reg_code;
299 300
  }
  void setcode(int f) {
301
    reg_code = f;
302
    DCHECK(is_valid());
303 304
  }
  // Unfortunately we can't make this private in a struct.
305
  int reg_code;
306 307
};

308
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
309
const FPUControlRegister FCSR = { kFCSRRegister };
310 311 312 313 314 315 316 317 318

// -----------------------------------------------------------------------------
// Machine instruction Operands.

// Class Operand represents a shifter operand in data processing instructions.
class Operand BASE_EMBEDDED {
 public:
  // Immediate.
  INLINE(explicit Operand(int32_t immediate,
319
         RelocInfo::Mode rmode = RelocInfo::NONE32));
320 321 322 323 324 325 326 327 328 329 330 331 332
  INLINE(explicit Operand(const ExternalReference& f));
  INLINE(explicit Operand(const char* s));
  INLINE(explicit Operand(Object** opp));
  INLINE(explicit Operand(Context** cpp));
  explicit Operand(Handle<Object> handle);
  INLINE(explicit Operand(Smi* value));

  // Register.
  INLINE(explicit Operand(Register rm));

  // Return true if this is a register operand.
  INLINE(bool is_reg() const);

333
  inline int32_t immediate() const {
334
    DCHECK(!is_reg());
335 336 337
    return imm32_;
  }

338 339 340 341
  Register rm() const { return rm_; }

 private:
  Register rm_;
342
  int32_t imm32_;  // Valid if rm_ == no_reg.
343 344 345 346 347 348 349 350 351 352 353
  RelocInfo::Mode rmode_;

  friend class Assembler;
  friend class MacroAssembler;
};


// On MIPS we have only one adressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
 public:
plind44@gmail.com's avatar
plind44@gmail.com committed
354 355 356 357 358 359
  // Immediate value attached to offset.
  enum OffsetAddend {
    offset_minus_one = -1,
    offset_zero = 0
  };

360
  explicit MemOperand(Register rn, int32_t offset = 0);
plind44@gmail.com's avatar
plind44@gmail.com committed
361 362
  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
                      OffsetAddend offset_addend = offset_zero);
363
  int32_t offset() const { return offset_; }
364

365 366 367 368
  bool OffsetIsInt16Encodable() const {
    return is_int16(offset_);
  }

369
 private:
370
  int32_t offset_;
371 372 373 374 375

  friend class Assembler;
};


376
class Assembler : public AssemblerBase {
377 378 379 380 381 382 383 384 385 386 387 388 389 390
 public:
  // Create an assembler. Instructions and relocation information are emitted
  // into a buffer, with the instructions starting from the beginning and the
  // relocation information starting from the end of the buffer. See CodeDesc
  // for a detailed comment on the layout (globals.h).
  //
  // If the provided buffer is NULL, the assembler allocates and grows its own
  // buffer, and buffer_size determines the initial buffer size. The buffer is
  // owned by the assembler and deallocated upon destruction of the assembler.
  //
  // If the provided buffer is not NULL, the assembler uses the provided buffer
  // for code generation and assumes its size to be buffer_size. If the buffer
  // is too small, a fatal error occurs. No deallocation of the buffer is done
  // upon destruction of the assembler.
391
  Assembler(Isolate* isolate, void* buffer, int buffer_size);
392
  virtual ~Assembler() { }
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

  // GetCode emits any pending (non-emitted) code and fills the descriptor
  // desc. GetCode() is idempotent; it returns the same result if no other
  // Assembler functions are invoked in between GetCode() calls.
  void GetCode(CodeDesc* desc);

  // Label operations & relative jumps (PPUM Appendix D).
  //
  // Takes a branch opcode (cc) and a label (L) and generates
  // either a backward branch or a forward branch and links it
  // to the label fixup chain. Usage:
  //
  // Label L;    // unbound label
  // j(cc, &L);  // forward branch to unbound label
  // bind(&L);   // bind label to the current pc
  // j(cc, &L);  // backward branch to bound label
  // bind(&L);   // illegal: a label may be bound only once
  //
  // Note: The same Label can be used for forward and backward branches
  // but it may be bound only once.
413
  void bind(Label* L);  // Binds an unbound label L to current code position.
414 415 416

  enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };

417 418 419
  // Determines if Label is bound and near enough so that branch instruction
  // can be used to reach it, instead of jump instruction.
  bool is_near(Label* L);
420 421
  bool is_near(Label* L, OffsetSize bits);
  bool is_near_branch(Label* L);
422 423 424 425 426 427 428 429 430 431
  inline bool is_near_pre_r6(Label* L) {
    DCHECK(!IsMipsArchVariant(kMips32r6));
    return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
  }
  inline bool is_near_r6(Label* L) {
    DCHECK(IsMipsArchVariant(kMips32r6));
    return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
  }

  int BranchOffset(Instr instr);
432

433 434
  // Returns the branch offset to the given label from the current code
  // position. Links the label to the current position if it is still unbound.
435
  // Manages the jump elimination optimization if the second parameter is true.
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
  int32_t branch_offset_helper(Label* L, OffsetSize bits);
  inline int32_t branch_offset(Label* L) {
    return branch_offset_helper(L, OffsetSize::kOffset16);
  }
  inline int32_t branch_offset21(Label* L) {
    return branch_offset_helper(L, OffsetSize::kOffset21);
  }
  inline int32_t branch_offset26(Label* L) {
    return branch_offset_helper(L, OffsetSize::kOffset26);
  }
  inline int32_t shifted_branch_offset(Label* L) {
    return branch_offset(L) >> 2;
  }
  inline int32_t shifted_branch_offset21(Label* L) {
    return branch_offset21(L) >> 2;
  }
  inline int32_t shifted_branch_offset26(Label* L) {
    return branch_offset26(L) >> 2;
454
  }
455
  uint32_t jump_address(Label* L);
456 457 458 459 460 461 462

  // Puts a labels target address at the given position.
  // The high 8 bits are set to zero.
  void label_at_put(Label* L, int at_offset);

  // Read/Modify the code target address in the branch/call instruction at pc.
  static Address target_address_at(Address pc);
463 464 465
  static void set_target_address_at(
      Isolate* isolate, Address pc, Address target,
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
466
  // On MIPS there is no Constant Pool so we skip that parameter.
467
  INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
468 469
    return target_address_at(pc);
  }
470
  INLINE(static void set_target_address_at(
471
      Isolate* isolate, Address pc, Address constant_pool, Address target,
472
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
473
    set_target_address_at(isolate, pc, target, icache_flush_mode);
474 475
  }
  INLINE(static Address target_address_at(Address pc, Code* code)) {
476
    Address constant_pool = code ? code->constant_pool() : NULL;
477 478
    return target_address_at(pc, constant_pool);
  }
479 480 481
  INLINE(static void set_target_address_at(
      Isolate* isolate, Address pc, Code* code, Address target,
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
482
    Address constant_pool = code ? code->constant_pool() : NULL;
483 484
    set_target_address_at(isolate, pc, constant_pool, target,
                          icache_flush_mode);
485
  }
486

487 488 489 490
  // Return the code target address at a call site from the return address
  // of that call in the instruction stream.
  inline static Address target_address_from_return_address(Address pc);

491 492
  static void QuietNaN(HeapObject* nan);

493
  // This sets the branch destination (which gets loaded at the call address).
494 495 496
  // This is for calls and branches within generated code.  The serializer
  // has already deserialized the lui/ori instructions etc.
  inline static void deserialization_set_special_target_at(
497 498
      Isolate* isolate, Address instruction_payload, Code* code,
      Address target) {
499
    set_target_address_at(
500 501
        isolate,
        instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
502
        target);
503 504
  }

505 506
  // This sets the internal reference at the pc.
  inline static void deserialization_set_target_internal_reference_at(
507
      Isolate* isolate, Address pc, Address target,
508
      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
509

510 511 512 513 514 515 516 517 518 519 520 521
  // Size of an instruction.
  static const int kInstrSize = sizeof(Instr);

  // Difference between address of current opcode and target address offset.
  static const int kBranchPCOffset = 4;

  // Here we are patching the address in the LUI/ORI instruction pair.
  // These values are used in the serialization process and must be zero for
  // MIPS platform, as Code, Embedded Object or External-reference pointers
  // are split across two consecutive instructions and don't exist separately
  // in the code, so the serializer should not step forwards in memory after
  // a target is resolved and written.
522
  static const int kSpecialTargetSize = 0;
523

524 525 526 527 528
  // Number of consecutive instructions used to store 32bit constant. This
  // constant is used in RelocInfo::target_address_address() function to tell
  // serializer address of the instruction that follows LUI/ORI instruction
  // pair.
  static const int kInstructionsFor32BitConstant = 2;
529 530 531

  // Distance between the instruction referring to the address of the call
  // target and the return address.
532 533 534
#ifdef _MIPS_ARCH_MIPS32R6
  static const int kCallTargetAddressOffset = 3 * kInstrSize;
#else
535
  static const int kCallTargetAddressOffset = 4 * kInstrSize;
536
#endif
537

538 539
  // Distance between start of patched debug break slot and the emitted address
  // to jump to.
540
  static const int kPatchDebugBreakSlotAddressOffset = 4 * kInstrSize;
541 542 543 544 545

  // Difference between address of current opcode and value read from pc
  // register.
  static const int kPcLoadDelta = 4;

546 547 548
#ifdef _MIPS_ARCH_MIPS32R6
  static const int kDebugBreakSlotInstructions = 3;
#else
549
  static const int kDebugBreakSlotInstructions = 4;
550
#endif
551 552 553
  static const int kDebugBreakSlotLength =
      kDebugBreakSlotInstructions * kInstrSize;

554 555 556 557 558 559 560 561 562 563 564
  // Max offset for instructions with 16-bit offset field
  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;

  // Max offset for compact branch instructions with 26-bit offset field
  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;

#ifdef _MIPS_ARCH_MIPS32R6
  static const int kTrampolineSlotsSize = 2 * kInstrSize;
#else
  static const int kTrampolineSlotsSize = 4 * kInstrSize;
#endif
565 566 567 568

  // ---------------------------------------------------------------------------
  // Code generation.

569 570 571 572
  // Insert the smallest number of nop instructions
  // possible to align the pc offset to a multiple
  // of m. m must be a power of 2 (>= 4).
  void Align(int m);
573 574 575
  // Insert the smallest number of zero bytes possible to align the pc offset
  // to a mulitple of m. m must be a power of 2 (>= 2).
  void DataAlign(int m);
576 577 578 579 580 581 582 583 584 585 586 587 588 589
  // Aligns code to something that's optimal for a jump target for the platform.
  void CodeTargetAlign();

  // Different nop operations are used by the code generator to detect certain
  // states of the generated code.
  enum NopMarkerTypes {
    NON_MARKING_NOP = 0,
    DEBUG_BREAK_NOP,
    // IC markers.
    PROPERTY_ACCESS_INLINED,
    PROPERTY_ACCESS_INLINED_CONTEXT,
    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
    // Helper values.
    LAST_CODE_MARKER,
590 591
    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
    // Code aging
592 593
    CODE_AGE_MARKER_NOP = 6,
    CODE_AGE_SEQUENCE_NOP
594 595
  };

596 597 598
  // Type == 0 is the default non-marking nop. For mips this is a
  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
  // marking, to avoid conflict with ssnop and ehb instructions.
599
  void nop(unsigned int type = 0) {
600
    DCHECK(type < 32);
601 602
    Register nop_rt_reg = (type == 0) ? zero_reg : at;
    sll(zero_reg, nop_rt_reg, type, true);
603
  }
604 605


606
  // --------Branch-and-jump-instructions----------
607 608
  // We don't use likely variant of instructions.
  void b(int16_t offset);
609
  inline void b(Label* L) { b(shifted_branch_offset(L)); }
610
  void bal(int16_t offset);
611
  inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
612
  void bc(int32_t offset);
613
  inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
614
  void balc(int32_t offset);
615
  inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
616 617

  void beq(Register rs, Register rt, int16_t offset);
618 619
  inline void beq(Register rs, Register rt, Label* L) {
    beq(rs, rt, shifted_branch_offset(L));
620 621
  }
  void bgez(Register rs, int16_t offset);
622
  void bgezc(Register rt, int16_t offset);
623 624
  inline void bgezc(Register rt, Label* L) {
    bgezc(rt, shifted_branch_offset(L));
625 626
  }
  void bgeuc(Register rs, Register rt, int16_t offset);
627 628
  inline void bgeuc(Register rs, Register rt, Label* L) {
    bgeuc(rs, rt, shifted_branch_offset(L));
629 630
  }
  void bgec(Register rs, Register rt, int16_t offset);
631 632
  inline void bgec(Register rs, Register rt, Label* L) {
    bgec(rs, rt, shifted_branch_offset(L));
633
  }
634
  void bgezal(Register rs, int16_t offset);
635
  void bgezalc(Register rt, int16_t offset);
636 637
  inline void bgezalc(Register rt, Label* L) {
    bgezalc(rt, shifted_branch_offset(L));
638 639
  }
  void bgezall(Register rs, int16_t offset);
640 641
  inline void bgezall(Register rs, Label* L) {
    bgezall(rs, branch_offset(L) >> 2);
642
  }
643
  void bgtz(Register rs, int16_t offset);
644
  void bgtzc(Register rt, int16_t offset);
645 646
  inline void bgtzc(Register rt, Label* L) {
    bgtzc(rt, shifted_branch_offset(L));
647
  }
648
  void blez(Register rs, int16_t offset);
649
  void blezc(Register rt, int16_t offset);
650 651
  inline void blezc(Register rt, Label* L) {
    blezc(rt, shifted_branch_offset(L));
652
  }
653
  void bltz(Register rs, int16_t offset);
654
  void bltzc(Register rt, int16_t offset);
655 656
  inline void bltzc(Register rt, Label* L) {
    bltzc(rt, shifted_branch_offset(L));
657 658
  }
  void bltuc(Register rs, Register rt, int16_t offset);
659 660
  inline void bltuc(Register rs, Register rt, Label* L) {
    bltuc(rs, rt, shifted_branch_offset(L));
661 662
  }
  void bltc(Register rs, Register rt, int16_t offset);
663 664
  inline void bltc(Register rs, Register rt, Label* L) {
    bltc(rs, rt, shifted_branch_offset(L));
665
  }
666
  void bltzal(Register rs, int16_t offset);
667
  void blezalc(Register rt, int16_t offset);
668 669
  inline void blezalc(Register rt, Label* L) {
    blezalc(rt, shifted_branch_offset(L));
670 671
  }
  void bltzalc(Register rt, int16_t offset);
672 673
  inline void bltzalc(Register rt, Label* L) {
    bltzalc(rt, shifted_branch_offset(L));
674 675
  }
  void bgtzalc(Register rt, int16_t offset);
676 677
  inline void bgtzalc(Register rt, Label* L) {
    bgtzalc(rt, shifted_branch_offset(L));
678 679
  }
  void beqzalc(Register rt, int16_t offset);
680 681
  inline void beqzalc(Register rt, Label* L) {
    beqzalc(rt, shifted_branch_offset(L));
682 683
  }
  void beqc(Register rs, Register rt, int16_t offset);
684 685
  inline void beqc(Register rs, Register rt, Label* L) {
    beqc(rs, rt, shifted_branch_offset(L));
686 687
  }
  void beqzc(Register rs, int32_t offset);
688 689
  inline void beqzc(Register rs, Label* L) {
    beqzc(rs, shifted_branch_offset21(L));
690 691
  }
  void bnezalc(Register rt, int16_t offset);
692 693
  inline void bnezalc(Register rt, Label* L) {
    bnezalc(rt, shifted_branch_offset(L));
694 695
  }
  void bnec(Register rs, Register rt, int16_t offset);
696 697
  inline void bnec(Register rs, Register rt, Label* L) {
    bnec(rs, rt, shifted_branch_offset(L));
698 699
  }
  void bnezc(Register rt, int32_t offset);
700 701
  inline void bnezc(Register rt, Label* L) {
    bnezc(rt, shifted_branch_offset21(L));
702
  }
703
  void bne(Register rs, Register rt, int16_t offset);
704 705
  inline void bne(Register rs, Register rt, Label* L) {
    bne(rs, rt, shifted_branch_offset(L));
706
  }
707
  void bovc(Register rs, Register rt, int16_t offset);
708 709
  inline void bovc(Register rs, Register rt, Label* L) {
    bovc(rs, rt, shifted_branch_offset(L));
710 711
  }
  void bnvc(Register rs, Register rt, int16_t offset);
712 713
  inline void bnvc(Register rs, Register rt, Label* L) {
    bnvc(rs, rt, shifted_branch_offset(L));
714
  }
715 716

  // Never use the int16_t b(l)cond version with a branch offset
717
  // instead of using the Label* version.
718

719
  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
720 721 722 723
  void j(int32_t target);
  void jal(int32_t target);
  void jalr(Register rs, Register rd = ra);
  void jr(Register target);
724 725
  void jic(Register rt, int16_t offset);
  void jialc(Register rt, int16_t offset);
726 727


728
  // -------Data-processing-instructions---------
729 730 731 732 733 734 735 736

  // Arithmetic.
  void addu(Register rd, Register rs, Register rt);
  void subu(Register rd, Register rs, Register rt);
  void mult(Register rs, Register rt);
  void multu(Register rs, Register rt);
  void div(Register rs, Register rt);
  void divu(Register rs, Register rt);
737 738 739 740
  void div(Register rd, Register rs, Register rt);
  void divu(Register rd, Register rs, Register rt);
  void mod(Register rd, Register rs, Register rt);
  void modu(Register rd, Register rs, Register rt);
741
  void mul(Register rd, Register rs, Register rt);
742 743 744
  void muh(Register rd, Register rs, Register rt);
  void mulu(Register rd, Register rs, Register rt);
  void muhu(Register rd, Register rs, Register rt);
745 746 747 748 749 750 751 752 753 754 755 756 757

  void addiu(Register rd, Register rs, int32_t j);

  // Logical.
  void and_(Register rd, Register rs, Register rt);
  void or_(Register rd, Register rs, Register rt);
  void xor_(Register rd, Register rs, Register rt);
  void nor(Register rd, Register rs, Register rt);

  void andi(Register rd, Register rs, int32_t j);
  void ori(Register rd, Register rs, int32_t j);
  void xori(Register rd, Register rs, int32_t j);
  void lui(Register rd, int32_t j);
758
  void aui(Register rs, Register rt, int32_t j);
759 760

  // Shifts.
761 762 763 764
  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
  // and may cause problems in normal code. coming_from_nop makes sure this
  // doesn't happen.
  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
765 766 767 768 769
  void sllv(Register rd, Register rt, Register rs);
  void srl(Register rd, Register rt, uint16_t sa);
  void srlv(Register rd, Register rt, Register rs);
  void sra(Register rt, Register rd, uint16_t sa);
  void srav(Register rt, Register rd, Register rs);
770 771
  void rotr(Register rd, Register rt, uint16_t sa);
  void rotrv(Register rd, Register rt, Register rs);
772

773
  // ------------Memory-instructions-------------
774 775 776

  void lb(Register rd, const MemOperand& rs);
  void lbu(Register rd, const MemOperand& rs);
777 778
  void lh(Register rd, const MemOperand& rs);
  void lhu(Register rd, const MemOperand& rs);
779
  void lw(Register rd, const MemOperand& rs);
780 781
  void lwl(Register rd, const MemOperand& rs);
  void lwr(Register rd, const MemOperand& rs);
782
  void sb(Register rd, const MemOperand& rs);
783
  void sh(Register rd, const MemOperand& rs);
784
  void sw(Register rd, const MemOperand& rs);
785 786
  void swl(Register rd, const MemOperand& rs);
  void swr(Register rd, const MemOperand& rs);
787 788


789 790 791 792 793 794 795 796
  // ---------PC-Relative-instructions-----------

  void addiupc(Register rs, int32_t imm19);
  void lwpc(Register rs, int32_t offset19);
  void auipc(Register rs, int16_t imm16);
  void aluipc(Register rs, int16_t imm16);


797
  // ----------------Prefetch--------------------
plind44@gmail.com's avatar
plind44@gmail.com committed
798 799 800 801

  void pref(int32_t hint, const MemOperand& rs);


802
  // -------------Misc-instructions--------------
803 804

  // Break / Trap instructions.
805 806
  void break_(uint32_t code, bool break_as_stop = false);
  void stop(const char* msg, uint32_t code = kMaxStopCode);
807 808 809 810 811 812 813
  void tge(Register rs, Register rt, uint16_t code);
  void tgeu(Register rs, Register rt, uint16_t code);
  void tlt(Register rs, Register rt, uint16_t code);
  void tltu(Register rs, Register rt, uint16_t code);
  void teq(Register rs, Register rt, uint16_t code);
  void tne(Register rs, Register rt, uint16_t code);

814 815 816
  // Memory barrier instruction.
  void sync();

817 818 819 820 821 822 823 824 825 826
  // Move from HI/LO register.
  void mfhi(Register rd);
  void mflo(Register rd);

  // Set on less than.
  void slt(Register rd, Register rs, Register rt);
  void sltu(Register rd, Register rs, Register rt);
  void slti(Register rd, Register rs, int32_t j);
  void sltiu(Register rd, Register rs, int32_t j);

827 828 829 830 831 832
  // Conditional move.
  void movz(Register rd, Register rs, Register rt);
  void movn(Register rd, Register rs, Register rt);
  void movt(Register rd, Register rs, uint16_t cc = 0);
  void movf(Register rd, Register rs, uint16_t cc = 0);

833
  void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
834 835
  void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
836 837 838 839 840 841
  void seleqz(Register rd, Register rs, Register rt);
  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
              FPURegister ft);
  void selnez(Register rd, Register rs, Register rt);
  void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
              FPURegister ft);
842 843 844 845 846 847 848
  void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);

  void movz_s(FPURegister fd, FPURegister fs, Register rt);
  void movz_d(FPURegister fd, FPURegister fs, Register rt);
849 850 851 852
  void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
  void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
  void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
  void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
853 854
  void movn_s(FPURegister fd, FPURegister fs, Register rt);
  void movn_d(FPURegister fd, FPURegister fs, Register rt);
855 856 857 858
  // Bit twiddling.
  void clz(Register rd, Register rs);
  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
859
  void bitswap(Register rd, Register rt);
860
  void align(Register rd, Register rs, Register rt, uint8_t bp);
861

862 863 864 865
  void wsbh(Register rd, Register rt);
  void seh(Register rd, Register rt);
  void seb(Register rd, Register rt);

866
  // --------Coprocessor-instructions----------------
867 868 869 870 871 872 873 874

  // Load, store, and move.
  void lwc1(FPURegister fd, const MemOperand& src);
  void ldc1(FPURegister fd, const MemOperand& src);

  void swc1(FPURegister fs, const MemOperand& dst);
  void sdc1(FPURegister fs, const MemOperand& dst);

875
  void mtc1(Register rt, FPURegister fs);
876 877
  void mthc1(Register rt, FPURegister fs);

878
  void mfc1(Register rt, FPURegister fs);
879
  void mfhc1(Register rt, FPURegister fs);
880 881 882 883 884

  void ctc1(Register rt, FPUControlRegister fs);
  void cfc1(Register rt, FPUControlRegister fs);

  // Arithmetic.
885
  void add_s(FPURegister fd, FPURegister fs, FPURegister ft);
886
  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
887
  void sub_s(FPURegister fd, FPURegister fs, FPURegister ft);
888
  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
889
  void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
890
  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
891
  void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
892
  void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
893 894 895 896 897 898
  void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
  void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
  void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft);
899
  void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
900
  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
901
  void abs_s(FPURegister fd, FPURegister fs);
902 903
  void abs_d(FPURegister fd, FPURegister fs);
  void mov_d(FPURegister fd, FPURegister fs);
904
  void mov_s(FPURegister fd, FPURegister fs);
905
  void neg_s(FPURegister fd, FPURegister fs);
906
  void neg_d(FPURegister fd, FPURegister fs);
907
  void sqrt_s(FPURegister fd, FPURegister fs);
908
  void sqrt_d(FPURegister fd, FPURegister fs);
909 910 911 912
  void rsqrt_s(FPURegister fd, FPURegister fs);
  void rsqrt_d(FPURegister fd, FPURegister fs);
  void recip_d(FPURegister fd, FPURegister fs);
  void recip_s(FPURegister fd, FPURegister fs);
913 914 915 916

  // Conversion.
  void cvt_w_s(FPURegister fd, FPURegister fs);
  void cvt_w_d(FPURegister fd, FPURegister fs);
917 918 919 920 921 922 923 924
  void trunc_w_s(FPURegister fd, FPURegister fs);
  void trunc_w_d(FPURegister fd, FPURegister fs);
  void round_w_s(FPURegister fd, FPURegister fs);
  void round_w_d(FPURegister fd, FPURegister fs);
  void floor_w_s(FPURegister fd, FPURegister fs);
  void floor_w_d(FPURegister fd, FPURegister fs);
  void ceil_w_s(FPURegister fd, FPURegister fs);
  void ceil_w_d(FPURegister fd, FPURegister fs);
925 926 927
  void rint_s(FPURegister fd, FPURegister fs);
  void rint_d(FPURegister fd, FPURegister fs);
  void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
928 929 930

  void cvt_l_s(FPURegister fd, FPURegister fs);
  void cvt_l_d(FPURegister fd, FPURegister fs);
931 932 933 934 935 936 937 938
  void trunc_l_s(FPURegister fd, FPURegister fs);
  void trunc_l_d(FPURegister fd, FPURegister fs);
  void round_l_s(FPURegister fd, FPURegister fs);
  void round_l_d(FPURegister fd, FPURegister fs);
  void floor_l_s(FPURegister fd, FPURegister fs);
  void floor_l_d(FPURegister fd, FPURegister fs);
  void ceil_l_s(FPURegister fd, FPURegister fs);
  void ceil_l_d(FPURegister fd, FPURegister fs);
939

940 941 942
  void class_s(FPURegister fd, FPURegister fs);
  void class_d(FPURegister fd, FPURegister fs);

943 944 945 946
  void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
  void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
  void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
  void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
947 948 949 950 951 952 953 954
  void min_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void min_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void max_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void max_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void mina_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void mina_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft);
955

956 957 958 959 960 961 962 963
  void cvt_s_w(FPURegister fd, FPURegister fs);
  void cvt_s_l(FPURegister fd, FPURegister fs);
  void cvt_s_d(FPURegister fd, FPURegister fs);

  void cvt_d_w(FPURegister fd, FPURegister fs);
  void cvt_d_l(FPURegister fd, FPURegister fs);
  void cvt_d_s(FPURegister fd, FPURegister fs);

964 965 966
  // Conditions and branches for MIPSr6.
  void cmp(FPUCondition cond, SecondaryField fmt,
         FPURegister fd, FPURegister ft, FPURegister fs);
967 968
  void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
  void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
969 970

  void bc1eqz(int16_t offset, FPURegister ft);
971 972
  inline void bc1eqz(Label* L, FPURegister ft) {
    bc1eqz(shifted_branch_offset(L), ft);
973 974
  }
  void bc1nez(int16_t offset, FPURegister ft);
975 976
  inline void bc1nez(Label* L, FPURegister ft) {
    bc1nez(shifted_branch_offset(L), ft);
977 978 979
  }

  // Conditions and branches for non MIPSr6.
980 981
  void c(FPUCondition cond, SecondaryField fmt,
         FPURegister ft, FPURegister fs, uint16_t cc = 0);
982 983
  void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
  void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
984 985

  void bc1f(int16_t offset, uint16_t cc = 0);
986 987 988
  inline void bc1f(Label* L, uint16_t cc = 0) {
    bc1f(shifted_branch_offset(L), cc);
  }
989
  void bc1t(int16_t offset, uint16_t cc = 0);
990 991 992
  inline void bc1t(Label* L, uint16_t cc = 0) {
    bc1t(shifted_branch_offset(L), cc);
  }
993
  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
994 995

  // Check the code size generated from label to here.
996 997 998 999 1000 1001 1002
  int SizeOfCodeGeneratedSince(Label* label) {
    return pc_offset() - label->pos();
  }

  // Check the number of instructions generated from label to here.
  int InstructionsGeneratedSince(Label* label) {
    return SizeOfCodeGeneratedSince(label) / kInstrSize;
1003 1004
  }

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
  // Class for scoping postponing the trampoline pool generation.
  class BlockTrampolinePoolScope {
   public:
    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
      assem_->StartBlockTrampolinePool();
    }
    ~BlockTrampolinePoolScope() {
      assem_->EndBlockTrampolinePool();
    }

   private:
    Assembler* assem_;

    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
  };

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
  // Class for postponing the assembly buffer growth. Typically used for
  // sequences of instructions that must be emitted as a unit, before
  // buffer growth (and relocation) can occur.
  // This blocking scope is not nestable.
  class BlockGrowBufferScope {
   public:
    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
      assem_->StartBlockGrowBuffer();
    }
    ~BlockGrowBufferScope() {
      assem_->EndBlockGrowBuffer();
    }

1034 1035
   private:
    Assembler* assem_;
1036

1037
    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
1038 1039
  };

1040 1041
  // Debugging.

1042
  // Mark address of a debug break slot.
1043
  void RecordDebugBreakSlot(RelocInfo::Mode mode);
1044

1045 1046
  // Record the AST id of the CallIC being compiled, so that it can be placed
  // in the relocation information.
1047
  void SetRecordedAstId(TypeFeedbackId ast_id) {
1048
    DCHECK(recorded_ast_id_.IsNone());
1049 1050 1051
    recorded_ast_id_ = ast_id;
  }

1052
  TypeFeedbackId RecordedAstId() {
1053
    DCHECK(!recorded_ast_id_.IsNone());
1054 1055 1056
    return recorded_ast_id_;
  }

1057
  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
1058

1059
  // Record a comment relocation entry that can be used by a disassembler.
1060
  // Use --code-comments to enable.
1061 1062
  void RecordComment(const char* msg);

1063 1064
  // Record a deoptimization reason that can be used by a log or cpu profiler.
  // Use --trace-deopt to enable.
1065 1066
  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
                         int id);
1067

1068 1069
  static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                       intptr_t pc_delta);
1070

1071 1072 1073 1074
  // Writes a single byte or word of data in the code stream.  Used for
  // inline tables, e.g., jump-tables.
  void db(uint8_t data);
  void dd(uint32_t data);
1075 1076
  void dq(uint64_t data);
  void dp(uintptr_t data) { dd(data); }
1077
  void dd(Label* label);
1078

1079 1080 1081 1082
  // Postpone the generation of the trampoline pool for the specified number of
  // instructions.
  void BlockTrampolinePoolFor(int instructions);

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
  // Check if there is less than kGap bytes available in the buffer.
  // If this is the case, we need to grow the buffer before emitting
  // an instruction or relocation information.
  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }

  // Get the number of bytes available in the buffer.
  inline int available_space() const { return reloc_info_writer.pos() - pc_; }

  // Read/patch instructions.
  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
1093
  static void instr_at_put(byte* pc, Instr instr) {
1094 1095 1096 1097 1098 1099 1100 1101
    *reinterpret_cast<Instr*>(pc) = instr;
  }
  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
  void instr_at_put(int pos, Instr instr) {
    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
  }

  // Check if an instruction is a branch of some kind.
1102
  static bool IsBranch(Instr instr);
1103 1104
  static bool IsBc(Instr instr);
  static bool IsBzc(Instr instr);
1105 1106
  static bool IsBeq(Instr instr);
  static bool IsBne(Instr instr);
1107 1108 1109 1110
  static bool IsBeqzc(Instr instr);
  static bool IsBnezc(Instr instr);
  static bool IsBeqc(Instr instr);
  static bool IsBnec(Instr instr);
1111
  static bool IsJicOrJialc(Instr instr);
1112

1113 1114 1115 1116 1117
  static bool IsJump(Instr instr);
  static bool IsJ(Instr instr);
  static bool IsLui(Instr instr);
  static bool IsOri(Instr instr);

1118 1119 1120 1121
  static bool IsJal(Instr instr);
  static bool IsJr(Instr instr);
  static bool IsJalr(Instr instr);

1122 1123 1124 1125 1126 1127 1128 1129
  static bool IsNop(Instr instr, unsigned int type);
  static bool IsPop(Instr instr);
  static bool IsPush(Instr instr);
  static bool IsLwRegFpOffset(Instr instr);
  static bool IsSwRegFpOffset(Instr instr);
  static bool IsLwRegFpNegOffset(Instr instr);
  static bool IsSwRegFpNegOffset(Instr instr);

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
  static Register GetRtReg(Instr instr);
  static Register GetRsReg(Instr instr);
  static Register GetRdReg(Instr instr);

  static uint32_t GetRt(Instr instr);
  static uint32_t GetRtField(Instr instr);
  static uint32_t GetRs(Instr instr);
  static uint32_t GetRsField(Instr instr);
  static uint32_t GetRd(Instr instr);
  static uint32_t GetRdField(Instr instr);
  static uint32_t GetSa(Instr instr);
  static uint32_t GetSaField(Instr instr);
  static uint32_t GetOpcodeField(Instr instr);
1143 1144
  static uint32_t GetFunction(Instr instr);
  static uint32_t GetFunctionField(Instr instr);
1145 1146
  static uint32_t GetImmediate16(Instr instr);
  static uint32_t GetLabelConst(Instr instr);
1147 1148 1149 1150

  static int32_t GetBranchOffset(Instr instr);
  static bool IsLw(Instr instr);
  static int16_t GetLwOffset(Instr instr);
1151 1152
  static int16_t GetJicOrJialcOffset(Instr instr);
  static int16_t GetLuiOffset(Instr instr);
1153 1154 1155 1156 1157 1158
  static Instr SetLwOffset(Instr instr, int16_t offset);

  static bool IsSw(Instr instr);
  static Instr SetSwOffset(Instr instr, int16_t offset);
  static bool IsAddImmediate(Instr instr);
  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
1159 1160 1161 1162 1163 1164
  static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
  static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
                                  int16_t& jic_offset);
  static void UnpackTargetAddressUnsigned(uint32_t address,
                                          uint32_t& lui_offset,
                                          uint32_t& jic_offset);
1165

1166
  static bool IsAndImmediate(Instr instr);
1167
  static bool IsEmittedConstant(Instr instr);
1168

1169
  void CheckTrampolinePool();
1170

1171 1172 1173 1174 1175 1176
  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
                                          ConstantPoolEntry::Access access,
                                          ConstantPoolEntry::Type type) {
    // No embedded constant pool support.
    UNREACHABLE();
  }
1177

1178
  bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
1179 1180 1181
  static bool IsCompactBranchSupported() {
    return IsMipsArchVariant(kMips32r6);
  }
1182

1183 1184
  inline int UnboundLabelsCount() { return unbound_labels_count_; }

1185
 protected:
1186 1187 1188
  // Load Scaled Address instruction.
  void lsa(Register rd, Register rt, Register rs, uint8_t sa);

1189 1190
  // Helpers.
  void LoadRegPlusOffsetToAt(const MemOperand& src);
1191 1192
  int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
  int32_t LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src);
1193

1194 1195 1196
  // Relocation for a type-recording IC has the AST id added to it.  This
  // member variable is a way to pass the information from the call site to
  // the relocation info.
1197
  TypeFeedbackId recorded_ast_id_;
1198

1199
  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1200 1201

  // Decode branch instruction at pos and return branch target pos.
1202
  int target_at(int pos, bool is_internal);
1203 1204

  // Patch branch instruction at pos to branch to given branch target pos.
1205
  void target_at_put(int pos, int target_pos, bool is_internal);
1206 1207

  // Say if we need to relocate with this mode.
1208
  bool MustUseReg(RelocInfo::Mode rmode);
1209 1210 1211 1212

  // Record reloc info for current pc_.
  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);

1213 1214 1215 1216 1217 1218 1219 1220 1221
  // Block the emission of the trampoline pool before pc_offset.
  void BlockTrampolinePoolBefore(int pc_offset) {
    if (no_trampoline_pool_before_ < pc_offset)
      no_trampoline_pool_before_ = pc_offset;
  }

  void StartBlockTrampolinePool() {
    trampoline_pool_blocked_nesting_++;
  }
1222

1223 1224 1225 1226 1227 1228 1229 1230
  void EndBlockTrampolinePool() {
    trampoline_pool_blocked_nesting_--;
  }

  bool is_trampoline_pool_blocked() const {
    return trampoline_pool_blocked_nesting_ > 0;
  }

1231 1232 1233 1234
  bool has_exception() const {
    return internal_trampoline_exception_;
  }

1235 1236
  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);

1237 1238 1239 1240 1241 1242
  bool is_trampoline_emitted() const {
    return trampoline_emitted_;
  }

  // Temporarily block automatic assembly buffer growth.
  void StartBlockGrowBuffer() {
1243
    DCHECK(!block_buffer_growth_);
1244 1245 1246 1247
    block_buffer_growth_ = true;
  }

  void EndBlockGrowBuffer() {
1248
    DCHECK(block_buffer_growth_);
1249 1250 1251 1252 1253 1254 1255
    block_buffer_growth_ = false;
  }

  bool is_buffer_growth_blocked() const {
    return block_buffer_growth_;
  }

1256 1257 1258 1259 1260 1261
  void EmitForbiddenSlotInstruction() {
    if (IsPrevInstrCompactBranch()) {
      nop();
    }
  }

1262 1263
  inline void CheckTrampolinePoolQuick(int extra_instructions = 0);

1264 1265
  inline void CheckBuffer();

1266
 private:
1267 1268 1269
  inline static void set_target_internal_reference_encoded_at(Address pc,
                                                              Address target);

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
  // Buffer size and constant pool distance are checked together at regular
  // intervals of kBufferCheckInterval emitted bytes.
  static const int kBufferCheckInterval = 1*KB/2;

  // Code generation.
  // The relocation writer's position is at least kGap bytes below the end of
  // the generated instructions. This is so that multi-instruction sequences do
  // not have to check for overflow. The same is true for writes of large
  // relocation info entries.
  static const int kGap = 32;

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296

  // Repeated checking whether the trampoline pool should be emitted is rather
  // expensive. By default we only check again once a number of instructions
  // has been generated.
  static const int kCheckConstIntervalInst = 32;
  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;

  int next_buffer_check_;  // pc offset of next buffer check.

  // Emission of the trampoline pool may be blocked in some code sequences.
  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
  int no_trampoline_pool_before_;  // Block emission before this pc offset.

  // Keep track of the last emitted pool to guarantee a maximal distance.
  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.

1297 1298 1299
  // Automatic growth of the assembly buffer may be blocked for some sequences.
  bool block_buffer_growth_;  // Block growth when true.

1300 1301 1302 1303 1304 1305 1306 1307
  // Relocation information generation.
  // Each relocation is encoded as a variable size value.
  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
  RelocInfoWriter reloc_info_writer;

  // The bound position, before this we cannot do instruction elimination.
  int last_bound_pos_;

1308 1309 1310
  // Readable constants for compact branch handling in emit()
  enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };

1311 1312
  // Code emission.
  void GrowBuffer();
1313 1314
  inline void emit(Instr x,
                   CompactBranchType is_compact_branch = CompactBranchType::NO);
1315 1316 1317 1318 1319
  inline void emit(uint64_t x);
  inline void CheckForEmitInForbiddenSlot();
  template <typename T>
  inline void EmitHelper(T x);
  inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335

  // Instruction generation.
  // We have 3 different kind of encoding layout on MIPS.
  // However due to many different types of objects encoded in the same fields
  // we have quite a few aliases for each mode.
  // Using the same structure to refer to Register and FPURegister would spare a
  // few aliases, but mixing both does not look clean to me.
  // Anyway we could surely implement this differently.

  void GenInstrRegister(Opcode opcode,
                        Register rs,
                        Register rt,
                        Register rd,
                        uint16_t sa = 0,
                        SecondaryField func = NULLSF);

1336 1337 1338 1339 1340 1341 1342
  void GenInstrRegister(Opcode opcode,
                        Register rs,
                        Register rt,
                        uint16_t msb,
                        uint16_t lsb,
                        SecondaryField func);

1343 1344 1345 1346 1347 1348 1349
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        FPURegister ft,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1350 1351 1352 1353 1354 1355 1356
  void GenInstrRegister(Opcode opcode,
                        FPURegister fr,
                        FPURegister ft,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1357 1358 1359 1360 1361 1362 1363
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        Register rt,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1364 1365 1366 1367 1368 1369
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        Register rt,
                        FPUControlRegister fs,
                        SecondaryField func = NULLSF);

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
  void GenInstrImmediate(
      Opcode opcode, Register rs, Register rt, int32_t j,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(
      Opcode opcode, Register rs, SecondaryField SF, int32_t j,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(
      Opcode opcode, Register r1, FPURegister r2, int32_t j,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(
      Opcode opcode, Register rs, int32_t offset21,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
  void GenInstrImmediate(
      Opcode opcode, int32_t offset26,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
1386 1387 1388 1389 1390 1391 1392 1393 1394


  void GenInstrJump(Opcode opcode,
                     uint32_t address);


  // Labels.
  void print(Label* L);
  void bind_to(Label* L, int pos);
1395
  void next(Label* L, bool is_internal);
1396

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
  // One trampoline consists of:
  // - space for trampoline slots,
  // - space for labels.
  //
  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
  // Space for trampoline slots preceeds space for labels. Each label is of one
  // instruction size, so total amount for labels is equal to
  // label_count *  kInstrSize.
  class Trampoline {
   public:
1407 1408 1409 1410 1411 1412 1413
    Trampoline() {
      start_ = 0;
      next_slot_ = 0;
      free_slot_count_ = 0;
      end_ = 0;
    }
    Trampoline(int start, int slot_count) {
1414 1415 1416
      start_ = start;
      next_slot_ = start;
      free_slot_count_ = slot_count;
1417
      end_ = start + slot_count * kTrampolineSlotsSize;
1418 1419 1420 1421 1422 1423 1424 1425
    }
    int start() {
      return start_;
    }
    int end() {
      return end_;
    }
    int take_slot() {
1426 1427 1428 1429 1430
      int trampoline_slot = kInvalidSlotPos;
      if (free_slot_count_ <= 0) {
        // We have run out of space on trampolines.
        // Make sure we fail in debug mode, so we become aware of each case
        // when this happens.
1431
        DCHECK(0);
1432 1433 1434 1435
        // Internal exception will be caught.
      } else {
        trampoline_slot = next_slot_;
        free_slot_count_--;
1436
        next_slot_ += kTrampolineSlotsSize;
1437
      }
1438 1439
      return trampoline_slot;
    }
1440

1441 1442 1443 1444 1445 1446 1447
   private:
    int start_;
    int end_;
    int next_slot_;
    int free_slot_count_;
  };

1448 1449 1450 1451 1452 1453 1454 1455
  int32_t get_trampoline_entry(int32_t pos);
  int unbound_labels_count_;
  // If trampoline is emitted, generated code is becoming large. As this is
  // already a slow case which can possibly break our code generation for the
  // extreme case, we use this information to trigger different mode of
  // branch instruction generation, where we use jump instructions rather
  // than regular branch instructions.
  bool trampoline_emitted_;
1456
  static const int kInvalidSlotPos = -1;
1457

1458 1459 1460
  // Internal reference positions, required for unbounded internal reference
  // labels.
  std::set<int> internal_reference_positions_;
1461 1462 1463 1464
  bool is_internal_reference(Label* L) {
    return internal_reference_positions_.find(L->pos()) !=
           internal_reference_positions_.end();
  }
1465

1466 1467 1468 1469
  void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
  void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
  bool prev_instr_compact_branch_ = false;

1470
  Trampoline trampoline_;
1471
  bool internal_trampoline_exception_;
1472

1473 1474
  friend class RegExpMacroAssemblerMIPS;
  friend class RelocInfo;
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
  friend class CodePatcher;
  friend class BlockTrampolinePoolScope;
  friend class EnsureSpace;
};


class EnsureSpace BASE_EMBEDDED {
 public:
  explicit EnsureSpace(Assembler* assembler) {
    assembler->CheckBuffer();
  }
1486 1487
};

1488 1489
}  // namespace internal
}  // namespace v8
1490 1491

#endif  // V8_ARM_ASSEMBLER_MIPS_H_