assembler-mips.h 52.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// The original source code covered by the above license above has been
// modified significantly by Google Inc.
33
// Copyright 2012 the V8 project authors. All rights reserved.
34 35 36 37 38 39


#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
#define V8_MIPS_ASSEMBLER_MIPS_H_

#include <stdio.h>
40

41 42
#include <set>

43 44
#include "src/assembler.h"
#include "src/mips/constants-mips.h"
45 46 47 48

namespace v8 {
namespace internal {

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
// clang-format off
#define GENERAL_REGISTERS(V)                              \
  V(zero_reg)  V(at)  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3)  \
  V(t0)  V(t1)  V(t2)  V(t3)  V(t4)  V(t5)  V(t6)  V(t7)  \
  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  V(t8)  V(t9) \
  V(k0)  V(k1)  V(gp)  V(sp)  V(fp)  V(ra)

#define ALLOCATABLE_GENERAL_REGISTERS(V) \
  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3) \
  V(t0)  V(t1)  V(t2)  V(t3)  V(t4)  V(t5)  V(t6) V(s7)

#define DOUBLE_REGISTERS(V)                               \
  V(f0)  V(f1)  V(f2)  V(f3)  V(f4)  V(f5)  V(f6)  V(f7)  \
  V(f8)  V(f9)  V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
  V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
  V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)

66
#define FLOAT_REGISTERS DOUBLE_REGISTERS
67
#define SIMD128_REGISTERS DOUBLE_REGISTERS
68

69 70
#define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
  V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
71
  V(f16) V(f18) V(f20) V(f22) V(f24)
72 73
// clang-format on

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.


// -----------------------------------------------------------------------------
97
// Implementation of Register and FPURegister.
98 99

struct Register {
100
  static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
101

102 103 104 105 106 107 108 109 110 111
  enum Code {
#define REGISTER_CODE(R) kCode_##R,
    GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
        kAfterLast,
    kCode_no_reg = -1
  };

  static const int kNumRegisters = Code::kAfterLast;

112 113 114 115 116 117 118 119 120 121
#if defined(V8_TARGET_LITTLE_ENDIAN)
  static const int kMantissaOffset = 0;
  static const int kExponentOffset = 4;
#elif defined(V8_TARGET_BIG_ENDIAN)
  static const int kMantissaOffset = 4;
  static const int kExponentOffset = 0;
#else
#error Unknown endianness
#endif

122 123

  static Register from_code(int code) {
124 125 126
    DCHECK(code >= 0);
    DCHECK(code < kNumRegisters);
    Register r = {code};
127 128
    return r;
  }
129 130
  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
  bool is(Register reg) const { return reg_code == reg.reg_code; }
131
  int code() const {
132
    DCHECK(is_valid());
133
    return reg_code;
134
  }
135
  int bit() const {
136
    DCHECK(is_valid());
137
    return 1 << reg_code;
138 139 140
  }

  // Unfortunately we can't make this private in a struct.
141
  int reg_code;
142 143
};

144 145 146 147 148 149 150
// s7: context register
// s3: lithium scratch
// s4: lithium scratch2
#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
151

152 153 154 155 156

int ToNumber(Register reg);

Register ToRegister(int num);

157 158
static const bool kSimpleFPAliasing = true;

159
// Coprocessor register.
160
struct FPURegister {
161 162 163 164 165 166 167
  enum Code {
#define REGISTER_CODE(R) kCode_##R,
    DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
        kAfterLast,
    kCode_no_reg = -1
  };
168

169
  static const int kMaxNumRegisters = Code::kAfterLast;
170 171 172

  inline static int NumRegisters();

173 174 175
  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
  // number of Double regs (64-bit regs, or FPU-reg-pairs).
176

177
  bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
178 179
  bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
  FPURegister low() const {
180
    // Find low reg of a Double-reg pair, which is the reg itself.
181
    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
182
    FPURegister reg;
183
    reg.reg_code = reg_code;
184
    DCHECK(reg.is_valid());
185 186
    return reg;
  }
187
  FPURegister high() const {
188
    // Find high reg of a Doubel-reg pair, which is reg + 1.
189
    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
190
    FPURegister reg;
191
    reg.reg_code = reg_code + 1;
192
    DCHECK(reg.is_valid());
193 194 195
    return reg;
  }

196
  int code() const {
197
    DCHECK(is_valid());
198
    return reg_code;
199
  }
200
  int bit() const {
201
    DCHECK(is_valid());
202 203 204
    return 1 << reg_code;
  }

205 206
  static FPURegister from_code(int code) {
    FPURegister r = {code};
207
    return r;
208
  }
209
  void setcode(int f) {
210
    reg_code = f;
211
    DCHECK(is_valid());
212
  }
213
  // Unfortunately we can't make this private in a struct.
214
  int reg_code;
215 216
};

217 218 219 220 221
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
//  f28: 0.0
//  f30: scratch register.

222 223 224 225 226 227 228 229 230
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
// on f0 really uses f0 and f1.
// (Modern mips hardware also supports 32 64-bit registers, via setting
// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
// but it is not in common use. Someday we will want to support this in v8.)

// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
231 232 233 234 235 236
typedef FPURegister FloatRegister;

typedef FPURegister DoubleRegister;

// TODO(mips) Define SIMD registers.
typedef FPURegister Simd128Register;
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

const DoubleRegister no_freg = {-1};

const DoubleRegister f0 = {0};  // Return value in hard float mode.
const DoubleRegister f1 = {1};
const DoubleRegister f2 = {2};
const DoubleRegister f3 = {3};
const DoubleRegister f4 = {4};
const DoubleRegister f5 = {5};
const DoubleRegister f6 = {6};
const DoubleRegister f7 = {7};
const DoubleRegister f8 = {8};
const DoubleRegister f9 = {9};
const DoubleRegister f10 = {10};
const DoubleRegister f11 = {11};
const DoubleRegister f12 = {12};  // Arg 0 in hard float mode.
const DoubleRegister f13 = {13};
const DoubleRegister f14 = {14};  // Arg 1 in hard float mode.
const DoubleRegister f15 = {15};
const DoubleRegister f16 = {16};
const DoubleRegister f17 = {17};
const DoubleRegister f18 = {18};
const DoubleRegister f19 = {19};
const DoubleRegister f20 = {20};
const DoubleRegister f21 = {21};
const DoubleRegister f22 = {22};
const DoubleRegister f23 = {23};
const DoubleRegister f24 = {24};
const DoubleRegister f25 = {25};
const DoubleRegister f26 = {26};
const DoubleRegister f27 = {27};
const DoubleRegister f28 = {28};
const DoubleRegister f29 = {29};
const DoubleRegister f30 = {30};
const DoubleRegister f31 = {31};
272

273 274
// Register aliases.
// cp is assumed to be a callee saved register.
275 276 277 278 279 280 281 282 283
// Defined using #define instead of "static const Register&" because Clang
// complains otherwise when a compilation unit that includes this header
// doesn't use the variables.
#define kRootRegister s6
#define cp s7
#define kLithiumScratchReg s3
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
284
// Used on mips32r6 for compare operations.
285
#define kDoubleCompareReg f26
286

287 288 289
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
290 291
  bool is_valid() const { return reg_code == kFCSRRegister; }
  bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
292
  int code() const {
293
    DCHECK(is_valid());
294
    return reg_code;
295 296
  }
  int bit() const {
297
    DCHECK(is_valid());
298
    return 1 << reg_code;
299 300
  }
  void setcode(int f) {
301
    reg_code = f;
302
    DCHECK(is_valid());
303 304
  }
  // Unfortunately we can't make this private in a struct.
305
  int reg_code;
306 307
};

308
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
309
const FPUControlRegister FCSR = { kFCSRRegister };
310 311 312 313 314 315 316 317 318

// -----------------------------------------------------------------------------
// Machine instruction Operands.

// Class Operand represents a shifter operand in data processing instructions.
class Operand BASE_EMBEDDED {
 public:
  // Immediate.
  INLINE(explicit Operand(int32_t immediate,
319
         RelocInfo::Mode rmode = RelocInfo::NONE32));
320 321 322 323 324 325 326 327 328 329 330 331 332
  INLINE(explicit Operand(const ExternalReference& f));
  INLINE(explicit Operand(const char* s));
  INLINE(explicit Operand(Object** opp));
  INLINE(explicit Operand(Context** cpp));
  explicit Operand(Handle<Object> handle);
  INLINE(explicit Operand(Smi* value));

  // Register.
  INLINE(explicit Operand(Register rm));

  // Return true if this is a register operand.
  INLINE(bool is_reg() const);

333
  inline int32_t immediate() const {
334
    DCHECK(!is_reg());
335 336 337
    return imm32_;
  }

338 339 340 341
  Register rm() const { return rm_; }

 private:
  Register rm_;
342
  int32_t imm32_;  // Valid if rm_ == no_reg.
343 344 345 346 347 348 349 350 351 352 353
  RelocInfo::Mode rmode_;

  friend class Assembler;
  friend class MacroAssembler;
};


// On MIPS we have only one adressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
 public:
plind44@gmail.com's avatar
plind44@gmail.com committed
354 355 356 357 358 359
  // Immediate value attached to offset.
  enum OffsetAddend {
    offset_minus_one = -1,
    offset_zero = 0
  };

360
  explicit MemOperand(Register rn, int32_t offset = 0);
plind44@gmail.com's avatar
plind44@gmail.com committed
361 362
  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
                      OffsetAddend offset_addend = offset_zero);
363
  int32_t offset() const { return offset_; }
364

365 366 367 368
  bool OffsetIsInt16Encodable() const {
    return is_int16(offset_);
  }

369
 private:
370
  int32_t offset_;
371 372 373 374 375

  friend class Assembler;
};


376
class Assembler : public AssemblerBase {
377 378 379 380 381 382 383 384 385 386 387 388 389 390
 public:
  // Create an assembler. Instructions and relocation information are emitted
  // into a buffer, with the instructions starting from the beginning and the
  // relocation information starting from the end of the buffer. See CodeDesc
  // for a detailed comment on the layout (globals.h).
  //
  // If the provided buffer is NULL, the assembler allocates and grows its own
  // buffer, and buffer_size determines the initial buffer size. The buffer is
  // owned by the assembler and deallocated upon destruction of the assembler.
  //
  // If the provided buffer is not NULL, the assembler uses the provided buffer
  // for code generation and assumes its size to be buffer_size. If the buffer
  // is too small, a fatal error occurs. No deallocation of the buffer is done
  // upon destruction of the assembler.
391
  Assembler(Isolate* isolate, void* buffer, int buffer_size);
392
  virtual ~Assembler() { }
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

  // GetCode emits any pending (non-emitted) code and fills the descriptor
  // desc. GetCode() is idempotent; it returns the same result if no other
  // Assembler functions are invoked in between GetCode() calls.
  void GetCode(CodeDesc* desc);

  // Label operations & relative jumps (PPUM Appendix D).
  //
  // Takes a branch opcode (cc) and a label (L) and generates
  // either a backward branch or a forward branch and links it
  // to the label fixup chain. Usage:
  //
  // Label L;    // unbound label
  // j(cc, &L);  // forward branch to unbound label
  // bind(&L);   // bind label to the current pc
  // j(cc, &L);  // backward branch to bound label
  // bind(&L);   // illegal: a label may be bound only once
  //
  // Note: The same Label can be used for forward and backward branches
  // but it may be bound only once.
413
  void bind(Label* L);  // Binds an unbound label L to current code position.
414 415 416

  enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };

417 418 419
  // Determines if Label is bound and near enough so that branch instruction
  // can be used to reach it, instead of jump instruction.
  bool is_near(Label* L);
420 421
  bool is_near(Label* L, OffsetSize bits);
  bool is_near_branch(Label* L);
422 423 424 425 426 427 428 429 430 431
  inline bool is_near_pre_r6(Label* L) {
    DCHECK(!IsMipsArchVariant(kMips32r6));
    return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
  }
  inline bool is_near_r6(Label* L) {
    DCHECK(IsMipsArchVariant(kMips32r6));
    return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
  }

  int BranchOffset(Instr instr);
432

433 434
  // Returns the branch offset to the given label from the current code
  // position. Links the label to the current position if it is still unbound.
435
  // Manages the jump elimination optimization if the second parameter is true.
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
  int32_t branch_offset_helper(Label* L, OffsetSize bits);
  inline int32_t branch_offset(Label* L) {
    return branch_offset_helper(L, OffsetSize::kOffset16);
  }
  inline int32_t branch_offset21(Label* L) {
    return branch_offset_helper(L, OffsetSize::kOffset21);
  }
  inline int32_t branch_offset26(Label* L) {
    return branch_offset_helper(L, OffsetSize::kOffset26);
  }
  inline int32_t shifted_branch_offset(Label* L) {
    return branch_offset(L) >> 2;
  }
  inline int32_t shifted_branch_offset21(Label* L) {
    return branch_offset21(L) >> 2;
  }
  inline int32_t shifted_branch_offset26(Label* L) {
    return branch_offset26(L) >> 2;
454
  }
455
  uint32_t jump_address(Label* L);
456 457 458 459 460 461 462

  // Puts a labels target address at the given position.
  // The high 8 bits are set to zero.
  void label_at_put(Label* L, int at_offset);

  // Read/Modify the code target address in the branch/call instruction at pc.
  static Address target_address_at(Address pc);
463 464 465
  static void set_target_address_at(
      Isolate* isolate, Address pc, Address target,
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
466
  // On MIPS there is no Constant Pool so we skip that parameter.
467
  INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
468 469
    return target_address_at(pc);
  }
470
  INLINE(static void set_target_address_at(
471
      Isolate* isolate, Address pc, Address constant_pool, Address target,
472
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
473
    set_target_address_at(isolate, pc, target, icache_flush_mode);
474
  }
475
  INLINE(static Address target_address_at(Address pc, Code* code));
476 477
  INLINE(static void set_target_address_at(
      Isolate* isolate, Address pc, Code* code, Address target,
478
      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
479

480 481 482 483
  // Return the code target address at a call site from the return address
  // of that call in the instruction stream.
  inline static Address target_address_from_return_address(Address pc);

484 485
  static void QuietNaN(HeapObject* nan);

486
  // This sets the branch destination (which gets loaded at the call address).
487 488 489
  // This is for calls and branches within generated code.  The serializer
  // has already deserialized the lui/ori instructions etc.
  inline static void deserialization_set_special_target_at(
490 491
      Isolate* isolate, Address instruction_payload, Code* code,
      Address target) {
492
    set_target_address_at(
493 494
        isolate,
        instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
495
        target);
496 497
  }

498 499
  // This sets the internal reference at the pc.
  inline static void deserialization_set_target_internal_reference_at(
500
      Isolate* isolate, Address pc, Address target,
501
      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
502

503 504 505 506 507 508 509 510 511 512 513 514
  // Size of an instruction.
  static const int kInstrSize = sizeof(Instr);

  // Difference between address of current opcode and target address offset.
  static const int kBranchPCOffset = 4;

  // Here we are patching the address in the LUI/ORI instruction pair.
  // These values are used in the serialization process and must be zero for
  // MIPS platform, as Code, Embedded Object or External-reference pointers
  // are split across two consecutive instructions and don't exist separately
  // in the code, so the serializer should not step forwards in memory after
  // a target is resolved and written.
515
  static const int kSpecialTargetSize = 0;
516

517 518 519 520 521
  // Number of consecutive instructions used to store 32bit constant. This
  // constant is used in RelocInfo::target_address_address() function to tell
  // serializer address of the instruction that follows LUI/ORI instruction
  // pair.
  static const int kInstructionsFor32BitConstant = 2;
522 523 524

  // Distance between the instruction referring to the address of the call
  // target and the return address.
525 526 527
#ifdef _MIPS_ARCH_MIPS32R6
  static const int kCallTargetAddressOffset = 3 * kInstrSize;
#else
528
  static const int kCallTargetAddressOffset = 4 * kInstrSize;
529
#endif
530

531 532
  // Distance between start of patched debug break slot and the emitted address
  // to jump to.
533
  static const int kPatchDebugBreakSlotAddressOffset = 4 * kInstrSize;
534 535 536 537 538

  // Difference between address of current opcode and value read from pc
  // register.
  static const int kPcLoadDelta = 4;

539 540 541
#ifdef _MIPS_ARCH_MIPS32R6
  static const int kDebugBreakSlotInstructions = 3;
#else
542
  static const int kDebugBreakSlotInstructions = 4;
543
#endif
544 545 546
  static const int kDebugBreakSlotLength =
      kDebugBreakSlotInstructions * kInstrSize;

547 548 549 550 551 552 553 554 555 556 557
  // Max offset for instructions with 16-bit offset field
  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;

  // Max offset for compact branch instructions with 26-bit offset field
  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;

#ifdef _MIPS_ARCH_MIPS32R6
  static const int kTrampolineSlotsSize = 2 * kInstrSize;
#else
  static const int kTrampolineSlotsSize = 4 * kInstrSize;
#endif
558 559 560 561

  // ---------------------------------------------------------------------------
  // Code generation.

562 563 564 565
  // Insert the smallest number of nop instructions
  // possible to align the pc offset to a multiple
  // of m. m must be a power of 2 (>= 4).
  void Align(int m);
566 567 568
  // Insert the smallest number of zero bytes possible to align the pc offset
  // to a mulitple of m. m must be a power of 2 (>= 2).
  void DataAlign(int m);
569 570 571 572 573 574 575 576 577 578 579 580 581 582
  // Aligns code to something that's optimal for a jump target for the platform.
  void CodeTargetAlign();

  // Different nop operations are used by the code generator to detect certain
  // states of the generated code.
  enum NopMarkerTypes {
    NON_MARKING_NOP = 0,
    DEBUG_BREAK_NOP,
    // IC markers.
    PROPERTY_ACCESS_INLINED,
    PROPERTY_ACCESS_INLINED_CONTEXT,
    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
    // Helper values.
    LAST_CODE_MARKER,
583 584
    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
    // Code aging
585 586
    CODE_AGE_MARKER_NOP = 6,
    CODE_AGE_SEQUENCE_NOP
587 588
  };

589 590 591
  // Type == 0 is the default non-marking nop. For mips this is a
  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
  // marking, to avoid conflict with ssnop and ehb instructions.
592
  void nop(unsigned int type = 0) {
593
    DCHECK(type < 32);
594 595
    Register nop_rt_reg = (type == 0) ? zero_reg : at;
    sll(zero_reg, nop_rt_reg, type, true);
596
  }
597 598


599
  // --------Branch-and-jump-instructions----------
600 601
  // We don't use likely variant of instructions.
  void b(int16_t offset);
602
  inline void b(Label* L) { b(shifted_branch_offset(L)); }
603
  void bal(int16_t offset);
604
  inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
605
  void bc(int32_t offset);
606
  inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
607
  void balc(int32_t offset);
608
  inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
609 610

  void beq(Register rs, Register rt, int16_t offset);
611 612
  inline void beq(Register rs, Register rt, Label* L) {
    beq(rs, rt, shifted_branch_offset(L));
613 614
  }
  void bgez(Register rs, int16_t offset);
615
  void bgezc(Register rt, int16_t offset);
616 617
  inline void bgezc(Register rt, Label* L) {
    bgezc(rt, shifted_branch_offset(L));
618 619
  }
  void bgeuc(Register rs, Register rt, int16_t offset);
620 621
  inline void bgeuc(Register rs, Register rt, Label* L) {
    bgeuc(rs, rt, shifted_branch_offset(L));
622 623
  }
  void bgec(Register rs, Register rt, int16_t offset);
624 625
  inline void bgec(Register rs, Register rt, Label* L) {
    bgec(rs, rt, shifted_branch_offset(L));
626
  }
627
  void bgezal(Register rs, int16_t offset);
628
  void bgezalc(Register rt, int16_t offset);
629 630
  inline void bgezalc(Register rt, Label* L) {
    bgezalc(rt, shifted_branch_offset(L));
631 632
  }
  void bgezall(Register rs, int16_t offset);
633 634
  inline void bgezall(Register rs, Label* L) {
    bgezall(rs, branch_offset(L) >> 2);
635
  }
636
  void bgtz(Register rs, int16_t offset);
637
  void bgtzc(Register rt, int16_t offset);
638 639
  inline void bgtzc(Register rt, Label* L) {
    bgtzc(rt, shifted_branch_offset(L));
640
  }
641
  void blez(Register rs, int16_t offset);
642
  void blezc(Register rt, int16_t offset);
643 644
  inline void blezc(Register rt, Label* L) {
    blezc(rt, shifted_branch_offset(L));
645
  }
646
  void bltz(Register rs, int16_t offset);
647
  void bltzc(Register rt, int16_t offset);
648 649
  inline void bltzc(Register rt, Label* L) {
    bltzc(rt, shifted_branch_offset(L));
650 651
  }
  void bltuc(Register rs, Register rt, int16_t offset);
652 653
  inline void bltuc(Register rs, Register rt, Label* L) {
    bltuc(rs, rt, shifted_branch_offset(L));
654 655
  }
  void bltc(Register rs, Register rt, int16_t offset);
656 657
  inline void bltc(Register rs, Register rt, Label* L) {
    bltc(rs, rt, shifted_branch_offset(L));
658
  }
659
  void bltzal(Register rs, int16_t offset);
660
  void blezalc(Register rt, int16_t offset);
661 662
  inline void blezalc(Register rt, Label* L) {
    blezalc(rt, shifted_branch_offset(L));
663 664
  }
  void bltzalc(Register rt, int16_t offset);
665 666
  inline void bltzalc(Register rt, Label* L) {
    bltzalc(rt, shifted_branch_offset(L));
667 668
  }
  void bgtzalc(Register rt, int16_t offset);
669 670
  inline void bgtzalc(Register rt, Label* L) {
    bgtzalc(rt, shifted_branch_offset(L));
671 672
  }
  void beqzalc(Register rt, int16_t offset);
673 674
  inline void beqzalc(Register rt, Label* L) {
    beqzalc(rt, shifted_branch_offset(L));
675 676
  }
  void beqc(Register rs, Register rt, int16_t offset);
677 678
  inline void beqc(Register rs, Register rt, Label* L) {
    beqc(rs, rt, shifted_branch_offset(L));
679 680
  }
  void beqzc(Register rs, int32_t offset);
681 682
  inline void beqzc(Register rs, Label* L) {
    beqzc(rs, shifted_branch_offset21(L));
683 684
  }
  void bnezalc(Register rt, int16_t offset);
685 686
  inline void bnezalc(Register rt, Label* L) {
    bnezalc(rt, shifted_branch_offset(L));
687 688
  }
  void bnec(Register rs, Register rt, int16_t offset);
689 690
  inline void bnec(Register rs, Register rt, Label* L) {
    bnec(rs, rt, shifted_branch_offset(L));
691 692
  }
  void bnezc(Register rt, int32_t offset);
693 694
  inline void bnezc(Register rt, Label* L) {
    bnezc(rt, shifted_branch_offset21(L));
695
  }
696
  void bne(Register rs, Register rt, int16_t offset);
697 698
  inline void bne(Register rs, Register rt, Label* L) {
    bne(rs, rt, shifted_branch_offset(L));
699
  }
700
  void bovc(Register rs, Register rt, int16_t offset);
701 702
  inline void bovc(Register rs, Register rt, Label* L) {
    bovc(rs, rt, shifted_branch_offset(L));
703 704
  }
  void bnvc(Register rs, Register rt, int16_t offset);
705 706
  inline void bnvc(Register rs, Register rt, Label* L) {
    bnvc(rs, rt, shifted_branch_offset(L));
707
  }
708 709

  // Never use the int16_t b(l)cond version with a branch offset
710
  // instead of using the Label* version.
711

712
  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
713 714 715 716
  void j(int32_t target);
  void jal(int32_t target);
  void jalr(Register rs, Register rd = ra);
  void jr(Register target);
717 718
  void jic(Register rt, int16_t offset);
  void jialc(Register rt, int16_t offset);
719 720


721
  // -------Data-processing-instructions---------
722 723 724 725 726 727 728 729

  // Arithmetic.
  void addu(Register rd, Register rs, Register rt);
  void subu(Register rd, Register rs, Register rt);
  void mult(Register rs, Register rt);
  void multu(Register rs, Register rt);
  void div(Register rs, Register rt);
  void divu(Register rs, Register rt);
730 731 732 733
  void div(Register rd, Register rs, Register rt);
  void divu(Register rd, Register rs, Register rt);
  void mod(Register rd, Register rs, Register rt);
  void modu(Register rd, Register rs, Register rt);
734
  void mul(Register rd, Register rs, Register rt);
735 736 737
  void muh(Register rd, Register rs, Register rt);
  void mulu(Register rd, Register rs, Register rt);
  void muhu(Register rd, Register rs, Register rt);
738 739 740 741 742 743 744 745 746 747 748 749 750

  void addiu(Register rd, Register rs, int32_t j);

  // Logical.
  void and_(Register rd, Register rs, Register rt);
  void or_(Register rd, Register rs, Register rt);
  void xor_(Register rd, Register rs, Register rt);
  void nor(Register rd, Register rs, Register rt);

  void andi(Register rd, Register rs, int32_t j);
  void ori(Register rd, Register rs, int32_t j);
  void xori(Register rd, Register rs, int32_t j);
  void lui(Register rd, int32_t j);
751
  void aui(Register rs, Register rt, int32_t j);
752 753

  // Shifts.
754 755 756 757
  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
  // and may cause problems in normal code. coming_from_nop makes sure this
  // doesn't happen.
  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
758 759 760 761 762
  void sllv(Register rd, Register rt, Register rs);
  void srl(Register rd, Register rt, uint16_t sa);
  void srlv(Register rd, Register rt, Register rs);
  void sra(Register rt, Register rd, uint16_t sa);
  void srav(Register rt, Register rd, Register rs);
763 764
  void rotr(Register rd, Register rt, uint16_t sa);
  void rotrv(Register rd, Register rt, Register rs);
765

766
  // ------------Memory-instructions-------------
767 768 769

  void lb(Register rd, const MemOperand& rs);
  void lbu(Register rd, const MemOperand& rs);
770 771
  void lh(Register rd, const MemOperand& rs);
  void lhu(Register rd, const MemOperand& rs);
772
  void lw(Register rd, const MemOperand& rs);
773 774
  void lwl(Register rd, const MemOperand& rs);
  void lwr(Register rd, const MemOperand& rs);
775
  void sb(Register rd, const MemOperand& rs);
776
  void sh(Register rd, const MemOperand& rs);
777
  void sw(Register rd, const MemOperand& rs);
778 779
  void swl(Register rd, const MemOperand& rs);
  void swr(Register rd, const MemOperand& rs);
780 781


782 783 784 785 786 787 788 789
  // ---------PC-Relative-instructions-----------

  void addiupc(Register rs, int32_t imm19);
  void lwpc(Register rs, int32_t offset19);
  void auipc(Register rs, int16_t imm16);
  void aluipc(Register rs, int16_t imm16);


790
  // ----------------Prefetch--------------------
plind44@gmail.com's avatar
plind44@gmail.com committed
791 792 793 794

  void pref(int32_t hint, const MemOperand& rs);


795
  // -------------Misc-instructions--------------
796 797

  // Break / Trap instructions.
798 799
  void break_(uint32_t code, bool break_as_stop = false);
  void stop(const char* msg, uint32_t code = kMaxStopCode);
800 801 802 803 804 805 806
  void tge(Register rs, Register rt, uint16_t code);
  void tgeu(Register rs, Register rt, uint16_t code);
  void tlt(Register rs, Register rt, uint16_t code);
  void tltu(Register rs, Register rt, uint16_t code);
  void teq(Register rs, Register rt, uint16_t code);
  void tne(Register rs, Register rt, uint16_t code);

807 808 809
  // Memory barrier instruction.
  void sync();

810 811 812 813 814 815 816 817 818 819
  // Move from HI/LO register.
  void mfhi(Register rd);
  void mflo(Register rd);

  // Set on less than.
  void slt(Register rd, Register rs, Register rt);
  void sltu(Register rd, Register rs, Register rt);
  void slti(Register rd, Register rs, int32_t j);
  void sltiu(Register rd, Register rs, int32_t j);

820 821 822 823 824 825
  // Conditional move.
  void movz(Register rd, Register rs, Register rt);
  void movn(Register rd, Register rs, Register rt);
  void movt(Register rd, Register rs, uint16_t cc = 0);
  void movf(Register rd, Register rs, uint16_t cc = 0);

826
  void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
827 828
  void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
829 830 831 832 833 834
  void seleqz(Register rd, Register rs, Register rt);
  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
              FPURegister ft);
  void selnez(Register rd, Register rs, Register rt);
  void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
              FPURegister ft);
835 836 837 838 839 840 841
  void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);

  void movz_s(FPURegister fd, FPURegister fs, Register rt);
  void movz_d(FPURegister fd, FPURegister fs, Register rt);
842 843 844 845
  void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
  void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
  void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
  void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
846 847
  void movn_s(FPURegister fd, FPURegister fs, Register rt);
  void movn_d(FPURegister fd, FPURegister fs, Register rt);
848 849 850 851
  // Bit twiddling.
  void clz(Register rd, Register rs);
  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
852
  void bitswap(Register rd, Register rt);
853
  void align(Register rd, Register rs, Register rt, uint8_t bp);
854

855 856 857 858
  void wsbh(Register rd, Register rt);
  void seh(Register rd, Register rt);
  void seb(Register rd, Register rt);

859
  // --------Coprocessor-instructions----------------
860 861 862 863 864 865 866 867

  // Load, store, and move.
  void lwc1(FPURegister fd, const MemOperand& src);
  void ldc1(FPURegister fd, const MemOperand& src);

  void swc1(FPURegister fs, const MemOperand& dst);
  void sdc1(FPURegister fs, const MemOperand& dst);

868
  void mtc1(Register rt, FPURegister fs);
869 870
  void mthc1(Register rt, FPURegister fs);

871
  void mfc1(Register rt, FPURegister fs);
872
  void mfhc1(Register rt, FPURegister fs);
873 874 875 876 877

  void ctc1(Register rt, FPUControlRegister fs);
  void cfc1(Register rt, FPUControlRegister fs);

  // Arithmetic.
878
  void add_s(FPURegister fd, FPURegister fs, FPURegister ft);
879
  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
880
  void sub_s(FPURegister fd, FPURegister fs, FPURegister ft);
881
  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
882
  void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
883
  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
884
  void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
885
  void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
886 887 888 889 890 891
  void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
  void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
  void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft);
892
  void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
893
  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
894
  void abs_s(FPURegister fd, FPURegister fs);
895 896
  void abs_d(FPURegister fd, FPURegister fs);
  void mov_d(FPURegister fd, FPURegister fs);
897
  void mov_s(FPURegister fd, FPURegister fs);
898
  void neg_s(FPURegister fd, FPURegister fs);
899
  void neg_d(FPURegister fd, FPURegister fs);
900
  void sqrt_s(FPURegister fd, FPURegister fs);
901
  void sqrt_d(FPURegister fd, FPURegister fs);
902 903 904 905
  void rsqrt_s(FPURegister fd, FPURegister fs);
  void rsqrt_d(FPURegister fd, FPURegister fs);
  void recip_d(FPURegister fd, FPURegister fs);
  void recip_s(FPURegister fd, FPURegister fs);
906 907 908 909

  // Conversion.
  void cvt_w_s(FPURegister fd, FPURegister fs);
  void cvt_w_d(FPURegister fd, FPURegister fs);
910 911 912 913 914 915 916 917
  void trunc_w_s(FPURegister fd, FPURegister fs);
  void trunc_w_d(FPURegister fd, FPURegister fs);
  void round_w_s(FPURegister fd, FPURegister fs);
  void round_w_d(FPURegister fd, FPURegister fs);
  void floor_w_s(FPURegister fd, FPURegister fs);
  void floor_w_d(FPURegister fd, FPURegister fs);
  void ceil_w_s(FPURegister fd, FPURegister fs);
  void ceil_w_d(FPURegister fd, FPURegister fs);
918 919 920
  void rint_s(FPURegister fd, FPURegister fs);
  void rint_d(FPURegister fd, FPURegister fs);
  void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
921 922 923

  void cvt_l_s(FPURegister fd, FPURegister fs);
  void cvt_l_d(FPURegister fd, FPURegister fs);
924 925 926 927 928 929 930 931
  void trunc_l_s(FPURegister fd, FPURegister fs);
  void trunc_l_d(FPURegister fd, FPURegister fs);
  void round_l_s(FPURegister fd, FPURegister fs);
  void round_l_d(FPURegister fd, FPURegister fs);
  void floor_l_s(FPURegister fd, FPURegister fs);
  void floor_l_d(FPURegister fd, FPURegister fs);
  void ceil_l_s(FPURegister fd, FPURegister fs);
  void ceil_l_d(FPURegister fd, FPURegister fs);
932

933 934 935
  void class_s(FPURegister fd, FPURegister fs);
  void class_d(FPURegister fd, FPURegister fs);

936 937 938 939
  void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
  void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
  void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
  void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
940 941 942 943 944 945 946 947
  void min_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void min_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void max_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void max_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void mina_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void mina_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft);
  void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft);
948

949 950 951 952 953 954 955 956
  void cvt_s_w(FPURegister fd, FPURegister fs);
  void cvt_s_l(FPURegister fd, FPURegister fs);
  void cvt_s_d(FPURegister fd, FPURegister fs);

  void cvt_d_w(FPURegister fd, FPURegister fs);
  void cvt_d_l(FPURegister fd, FPURegister fs);
  void cvt_d_s(FPURegister fd, FPURegister fs);

957 958 959
  // Conditions and branches for MIPSr6.
  void cmp(FPUCondition cond, SecondaryField fmt,
         FPURegister fd, FPURegister ft, FPURegister fs);
960 961
  void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
  void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
962 963

  void bc1eqz(int16_t offset, FPURegister ft);
964 965
  inline void bc1eqz(Label* L, FPURegister ft) {
    bc1eqz(shifted_branch_offset(L), ft);
966 967
  }
  void bc1nez(int16_t offset, FPURegister ft);
968 969
  inline void bc1nez(Label* L, FPURegister ft) {
    bc1nez(shifted_branch_offset(L), ft);
970 971 972
  }

  // Conditions and branches for non MIPSr6.
973 974
  void c(FPUCondition cond, SecondaryField fmt,
         FPURegister ft, FPURegister fs, uint16_t cc = 0);
975 976
  void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
  void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
977 978

  void bc1f(int16_t offset, uint16_t cc = 0);
979 980 981
  inline void bc1f(Label* L, uint16_t cc = 0) {
    bc1f(shifted_branch_offset(L), cc);
  }
982
  void bc1t(int16_t offset, uint16_t cc = 0);
983 984 985
  inline void bc1t(Label* L, uint16_t cc = 0) {
    bc1t(shifted_branch_offset(L), cc);
  }
986
  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
987 988

  // Check the code size generated from label to here.
989 990 991 992 993 994 995
  int SizeOfCodeGeneratedSince(Label* label) {
    return pc_offset() - label->pos();
  }

  // Check the number of instructions generated from label to here.
  int InstructionsGeneratedSince(Label* label) {
    return SizeOfCodeGeneratedSince(label) / kInstrSize;
996 997
  }

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
  // Class for scoping postponing the trampoline pool generation.
  class BlockTrampolinePoolScope {
   public:
    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
      assem_->StartBlockTrampolinePool();
    }
    ~BlockTrampolinePoolScope() {
      assem_->EndBlockTrampolinePool();
    }

   private:
    Assembler* assem_;

    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
  };

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
  // Class for postponing the assembly buffer growth. Typically used for
  // sequences of instructions that must be emitted as a unit, before
  // buffer growth (and relocation) can occur.
  // This blocking scope is not nestable.
  class BlockGrowBufferScope {
   public:
    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
      assem_->StartBlockGrowBuffer();
    }
    ~BlockGrowBufferScope() {
      assem_->EndBlockGrowBuffer();
    }

1027 1028
   private:
    Assembler* assem_;
1029

1030
    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
1031 1032
  };

1033 1034
  // Debugging.

1035
  // Mark address of a debug break slot.
1036
  void RecordDebugBreakSlot(RelocInfo::Mode mode);
1037

1038 1039
  // Record the AST id of the CallIC being compiled, so that it can be placed
  // in the relocation information.
1040
  void SetRecordedAstId(TypeFeedbackId ast_id) {
1041
    DCHECK(recorded_ast_id_.IsNone());
1042 1043 1044
    recorded_ast_id_ = ast_id;
  }

1045
  TypeFeedbackId RecordedAstId() {
1046
    DCHECK(!recorded_ast_id_.IsNone());
1047 1048 1049
    return recorded_ast_id_;
  }

1050
  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
1051

1052
  // Record a comment relocation entry that can be used by a disassembler.
1053
  // Use --code-comments to enable.
1054 1055
  void RecordComment(const char* msg);

1056 1057
  // Record a deoptimization reason that can be used by a log or cpu profiler.
  // Use --trace-deopt to enable.
1058 1059
  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
                         int id);
1060

1061 1062
  static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                       intptr_t pc_delta);
1063

1064 1065 1066 1067
  // Writes a single byte or word of data in the code stream.  Used for
  // inline tables, e.g., jump-tables.
  void db(uint8_t data);
  void dd(uint32_t data);
1068 1069
  void dq(uint64_t data);
  void dp(uintptr_t data) { dd(data); }
1070
  void dd(Label* label);
1071

1072 1073 1074 1075
  // Postpone the generation of the trampoline pool for the specified number of
  // instructions.
  void BlockTrampolinePoolFor(int instructions);

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
  // Check if there is less than kGap bytes available in the buffer.
  // If this is the case, we need to grow the buffer before emitting
  // an instruction or relocation information.
  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }

  // Get the number of bytes available in the buffer.
  inline int available_space() const { return reloc_info_writer.pos() - pc_; }

  // Read/patch instructions.
  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
1086
  static void instr_at_put(byte* pc, Instr instr) {
1087 1088 1089 1090 1091 1092 1093 1094
    *reinterpret_cast<Instr*>(pc) = instr;
  }
  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
  void instr_at_put(int pos, Instr instr) {
    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
  }

  // Check if an instruction is a branch of some kind.
1095
  static bool IsBranch(Instr instr);
1096 1097
  static bool IsBc(Instr instr);
  static bool IsBzc(Instr instr);
1098 1099
  static bool IsBeq(Instr instr);
  static bool IsBne(Instr instr);
1100 1101 1102 1103
  static bool IsBeqzc(Instr instr);
  static bool IsBnezc(Instr instr);
  static bool IsBeqc(Instr instr);
  static bool IsBnec(Instr instr);
1104
  static bool IsJicOrJialc(Instr instr);
1105

1106 1107 1108 1109 1110
  static bool IsJump(Instr instr);
  static bool IsJ(Instr instr);
  static bool IsLui(Instr instr);
  static bool IsOri(Instr instr);

1111 1112 1113 1114
  static bool IsJal(Instr instr);
  static bool IsJr(Instr instr);
  static bool IsJalr(Instr instr);

1115 1116 1117 1118 1119 1120 1121 1122
  static bool IsNop(Instr instr, unsigned int type);
  static bool IsPop(Instr instr);
  static bool IsPush(Instr instr);
  static bool IsLwRegFpOffset(Instr instr);
  static bool IsSwRegFpOffset(Instr instr);
  static bool IsLwRegFpNegOffset(Instr instr);
  static bool IsSwRegFpNegOffset(Instr instr);

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
  static Register GetRtReg(Instr instr);
  static Register GetRsReg(Instr instr);
  static Register GetRdReg(Instr instr);

  static uint32_t GetRt(Instr instr);
  static uint32_t GetRtField(Instr instr);
  static uint32_t GetRs(Instr instr);
  static uint32_t GetRsField(Instr instr);
  static uint32_t GetRd(Instr instr);
  static uint32_t GetRdField(Instr instr);
  static uint32_t GetSa(Instr instr);
  static uint32_t GetSaField(Instr instr);
  static uint32_t GetOpcodeField(Instr instr);
1136 1137
  static uint32_t GetFunction(Instr instr);
  static uint32_t GetFunctionField(Instr instr);
1138 1139
  static uint32_t GetImmediate16(Instr instr);
  static uint32_t GetLabelConst(Instr instr);
1140 1141 1142 1143

  static int32_t GetBranchOffset(Instr instr);
  static bool IsLw(Instr instr);
  static int16_t GetLwOffset(Instr instr);
1144 1145
  static int16_t GetJicOrJialcOffset(Instr instr);
  static int16_t GetLuiOffset(Instr instr);
1146 1147 1148 1149 1150 1151
  static Instr SetLwOffset(Instr instr, int16_t offset);

  static bool IsSw(Instr instr);
  static Instr SetSwOffset(Instr instr, int16_t offset);
  static bool IsAddImmediate(Instr instr);
  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
1152 1153 1154 1155 1156 1157
  static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
  static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
                                  int16_t& jic_offset);
  static void UnpackTargetAddressUnsigned(uint32_t address,
                                          uint32_t& lui_offset,
                                          uint32_t& jic_offset);
1158

1159
  static bool IsAndImmediate(Instr instr);
1160
  static bool IsEmittedConstant(Instr instr);
1161

1162
  void CheckTrampolinePool();
1163

1164 1165 1166 1167 1168 1169
  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
                                          ConstantPoolEntry::Access access,
                                          ConstantPoolEntry::Type type) {
    // No embedded constant pool support.
    UNREACHABLE();
  }
1170

1171
  bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
1172 1173 1174
  static bool IsCompactBranchSupported() {
    return IsMipsArchVariant(kMips32r6);
  }
1175

1176 1177
  inline int UnboundLabelsCount() { return unbound_labels_count_; }

1178
 protected:
1179 1180 1181
  // Load Scaled Address instruction.
  void lsa(Register rd, Register rt, Register rs, uint8_t sa);

1182 1183
  // Helpers.
  void LoadRegPlusOffsetToAt(const MemOperand& src);
1184 1185
  int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
  int32_t LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src);
1186

1187 1188 1189
  // Relocation for a type-recording IC has the AST id added to it.  This
  // member variable is a way to pass the information from the call site to
  // the relocation info.
1190
  TypeFeedbackId recorded_ast_id_;
1191

1192
  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1193 1194

  // Decode branch instruction at pos and return branch target pos.
1195
  int target_at(int pos, bool is_internal);
1196 1197

  // Patch branch instruction at pos to branch to given branch target pos.
1198
  void target_at_put(int pos, int target_pos, bool is_internal);
1199 1200

  // Say if we need to relocate with this mode.
1201
  bool MustUseReg(RelocInfo::Mode rmode);
1202 1203 1204 1205

  // Record reloc info for current pc_.
  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);

1206 1207 1208 1209 1210 1211 1212 1213 1214
  // Block the emission of the trampoline pool before pc_offset.
  void BlockTrampolinePoolBefore(int pc_offset) {
    if (no_trampoline_pool_before_ < pc_offset)
      no_trampoline_pool_before_ = pc_offset;
  }

  void StartBlockTrampolinePool() {
    trampoline_pool_blocked_nesting_++;
  }
1215

1216 1217 1218 1219 1220 1221 1222 1223
  void EndBlockTrampolinePool() {
    trampoline_pool_blocked_nesting_--;
  }

  bool is_trampoline_pool_blocked() const {
    return trampoline_pool_blocked_nesting_ > 0;
  }

1224 1225 1226 1227
  bool has_exception() const {
    return internal_trampoline_exception_;
  }

1228 1229
  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);

1230 1231 1232 1233 1234 1235
  bool is_trampoline_emitted() const {
    return trampoline_emitted_;
  }

  // Temporarily block automatic assembly buffer growth.
  void StartBlockGrowBuffer() {
1236
    DCHECK(!block_buffer_growth_);
1237 1238 1239 1240
    block_buffer_growth_ = true;
  }

  void EndBlockGrowBuffer() {
1241
    DCHECK(block_buffer_growth_);
1242 1243 1244 1245 1246 1247 1248
    block_buffer_growth_ = false;
  }

  bool is_buffer_growth_blocked() const {
    return block_buffer_growth_;
  }

1249 1250 1251 1252 1253 1254
  void EmitForbiddenSlotInstruction() {
    if (IsPrevInstrCompactBranch()) {
      nop();
    }
  }

1255 1256
  inline void CheckTrampolinePoolQuick(int extra_instructions = 0);

1257 1258
  inline void CheckBuffer();

1259
 private:
1260 1261 1262
  inline static void set_target_internal_reference_encoded_at(Address pc,
                                                              Address target);

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
  // Buffer size and constant pool distance are checked together at regular
  // intervals of kBufferCheckInterval emitted bytes.
  static const int kBufferCheckInterval = 1*KB/2;

  // Code generation.
  // The relocation writer's position is at least kGap bytes below the end of
  // the generated instructions. This is so that multi-instruction sequences do
  // not have to check for overflow. The same is true for writes of large
  // relocation info entries.
  static const int kGap = 32;

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289

  // Repeated checking whether the trampoline pool should be emitted is rather
  // expensive. By default we only check again once a number of instructions
  // has been generated.
  static const int kCheckConstIntervalInst = 32;
  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;

  int next_buffer_check_;  // pc offset of next buffer check.

  // Emission of the trampoline pool may be blocked in some code sequences.
  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
  int no_trampoline_pool_before_;  // Block emission before this pc offset.

  // Keep track of the last emitted pool to guarantee a maximal distance.
  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.

1290 1291 1292
  // Automatic growth of the assembly buffer may be blocked for some sequences.
  bool block_buffer_growth_;  // Block growth when true.

1293 1294 1295 1296 1297 1298 1299 1300
  // Relocation information generation.
  // Each relocation is encoded as a variable size value.
  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
  RelocInfoWriter reloc_info_writer;

  // The bound position, before this we cannot do instruction elimination.
  int last_bound_pos_;

1301 1302 1303
  // Readable constants for compact branch handling in emit()
  enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };

1304 1305
  // Code emission.
  void GrowBuffer();
1306 1307
  inline void emit(Instr x,
                   CompactBranchType is_compact_branch = CompactBranchType::NO);
1308 1309 1310 1311 1312
  inline void emit(uint64_t x);
  inline void CheckForEmitInForbiddenSlot();
  template <typename T>
  inline void EmitHelper(T x);
  inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328

  // Instruction generation.
  // We have 3 different kind of encoding layout on MIPS.
  // However due to many different types of objects encoded in the same fields
  // we have quite a few aliases for each mode.
  // Using the same structure to refer to Register and FPURegister would spare a
  // few aliases, but mixing both does not look clean to me.
  // Anyway we could surely implement this differently.

  void GenInstrRegister(Opcode opcode,
                        Register rs,
                        Register rt,
                        Register rd,
                        uint16_t sa = 0,
                        SecondaryField func = NULLSF);

1329 1330 1331 1332 1333 1334 1335
  void GenInstrRegister(Opcode opcode,
                        Register rs,
                        Register rt,
                        uint16_t msb,
                        uint16_t lsb,
                        SecondaryField func);

1336 1337 1338 1339 1340 1341 1342
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        FPURegister ft,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1343 1344 1345 1346 1347 1348 1349
  void GenInstrRegister(Opcode opcode,
                        FPURegister fr,
                        FPURegister ft,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1350 1351 1352 1353 1354 1355 1356
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        Register rt,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1357 1358 1359 1360 1361 1362
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        Register rt,
                        FPUControlRegister fs,
                        SecondaryField func = NULLSF);

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
  void GenInstrImmediate(
      Opcode opcode, Register rs, Register rt, int32_t j,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(
      Opcode opcode, Register rs, SecondaryField SF, int32_t j,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(
      Opcode opcode, Register r1, FPURegister r2, int32_t j,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(
      Opcode opcode, Register rs, int32_t offset21,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
  void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
  void GenInstrImmediate(
      Opcode opcode, int32_t offset26,
      CompactBranchType is_compact_branch = CompactBranchType::NO);
1379 1380 1381 1382 1383 1384 1385 1386 1387


  void GenInstrJump(Opcode opcode,
                     uint32_t address);


  // Labels.
  void print(Label* L);
  void bind_to(Label* L, int pos);
1388
  void next(Label* L, bool is_internal);
1389

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
  // One trampoline consists of:
  // - space for trampoline slots,
  // - space for labels.
  //
  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
  // Space for trampoline slots preceeds space for labels. Each label is of one
  // instruction size, so total amount for labels is equal to
  // label_count *  kInstrSize.
  class Trampoline {
   public:
1400 1401 1402 1403 1404 1405 1406
    Trampoline() {
      start_ = 0;
      next_slot_ = 0;
      free_slot_count_ = 0;
      end_ = 0;
    }
    Trampoline(int start, int slot_count) {
1407 1408 1409
      start_ = start;
      next_slot_ = start;
      free_slot_count_ = slot_count;
1410
      end_ = start + slot_count * kTrampolineSlotsSize;
1411 1412 1413 1414 1415 1416 1417 1418
    }
    int start() {
      return start_;
    }
    int end() {
      return end_;
    }
    int take_slot() {
1419 1420 1421 1422 1423
      int trampoline_slot = kInvalidSlotPos;
      if (free_slot_count_ <= 0) {
        // We have run out of space on trampolines.
        // Make sure we fail in debug mode, so we become aware of each case
        // when this happens.
1424
        DCHECK(0);
1425 1426 1427 1428
        // Internal exception will be caught.
      } else {
        trampoline_slot = next_slot_;
        free_slot_count_--;
1429
        next_slot_ += kTrampolineSlotsSize;
1430
      }
1431 1432
      return trampoline_slot;
    }
1433

1434 1435 1436 1437 1438 1439 1440
   private:
    int start_;
    int end_;
    int next_slot_;
    int free_slot_count_;
  };

1441 1442 1443 1444 1445 1446 1447 1448
  int32_t get_trampoline_entry(int32_t pos);
  int unbound_labels_count_;
  // If trampoline is emitted, generated code is becoming large. As this is
  // already a slow case which can possibly break our code generation for the
  // extreme case, we use this information to trigger different mode of
  // branch instruction generation, where we use jump instructions rather
  // than regular branch instructions.
  bool trampoline_emitted_;
1449
  static const int kInvalidSlotPos = -1;
1450

1451 1452 1453
  // Internal reference positions, required for unbounded internal reference
  // labels.
  std::set<int> internal_reference_positions_;
1454 1455 1456 1457
  bool is_internal_reference(Label* L) {
    return internal_reference_positions_.find(L->pos()) !=
           internal_reference_positions_.end();
  }
1458

1459 1460 1461 1462
  void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
  void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
  bool prev_instr_compact_branch_ = false;

1463
  Trampoline trampoline_;
1464
  bool internal_trampoline_exception_;
1465

1466 1467
  friend class RegExpMacroAssemblerMIPS;
  friend class RelocInfo;
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
  friend class CodePatcher;
  friend class BlockTrampolinePoolScope;
  friend class EnsureSpace;
};


class EnsureSpace BASE_EMBEDDED {
 public:
  explicit EnsureSpace(Assembler* assembler) {
    assembler->CheckBuffer();
  }
1479 1480
};

1481 1482
}  // namespace internal
}  // namespace v8
1483 1484

#endif  // V8_ARM_ASSEMBLER_MIPS_H_