assembler-mips.h 48.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// The original source code covered by the above license above has been
// modified significantly by Google Inc.
33
// Copyright 2012 the V8 project authors. All rights reserved.
34 35 36 37 38 39


#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
#define V8_MIPS_ASSEMBLER_MIPS_H_

#include <stdio.h>
40

41 42
#include <set>

43
#include "src/assembler.h"
44
#include "src/compiler.h"
45 46
#include "src/mips/constants-mips.h"
#include "src/serialize.h"
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

namespace v8 {
namespace internal {

// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.


// -----------------------------------------------------------------------------
74
// Implementation of Register and FPURegister.
75 76 77

// Core register.
struct Register {
78
  static const int kNumRegisters = v8::internal::kNumRegisters;
79
  static const int kMaxNumAllocatableRegisters = 14;  // v0 through t6 and cp.
80
  static const int kSizeInBytes = 4;
81
  static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
82

83 84 85 86 87 88 89 90 91 92
#if defined(V8_TARGET_LITTLE_ENDIAN)
  static const int kMantissaOffset = 0;
  static const int kExponentOffset = 4;
#elif defined(V8_TARGET_BIG_ENDIAN)
  static const int kMantissaOffset = 4;
  static const int kExponentOffset = 0;
#else
#error Unknown endianness
#endif

93
  inline static int NumAllocatableRegisters();
94 95

  static int ToAllocationIndex(Register reg) {
96
    DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
97 98 99 100
           reg.is(from_code(kCpRegister)));
    return reg.is(from_code(kCpRegister)) ?
           kMaxNumAllocatableRegisters - 1 :  // Return last index for 'cp'.
           reg.code() - 2;  // zero_reg and 'at' are skipped.
101 102 103
  }

  static Register FromAllocationIndex(int index) {
104
    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
105 106 107
    return index == kMaxNumAllocatableRegisters - 1 ?
           from_code(kCpRegister) :  // Last index is always the 'cp' register.
           from_code(index + 2);  // zero_reg and 'at' are skipped.
108 109 110
  }

  static const char* AllocationIndexToString(int index) {
111
    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
112 113 114 115 116 117 118 119 120 121 122 123 124 125
    const char* const names[] = {
      "v0",
      "v1",
      "a0",
      "a1",
      "a2",
      "a3",
      "t0",
      "t1",
      "t2",
      "t3",
      "t4",
      "t5",
      "t6",
126
      "s7",
127 128 129 130 131 132 133 134 135
    };
    return names[index];
  }

  static Register from_code(int code) {
    Register r = { code };
    return r;
  }

136 137 138
  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
  bool is(Register reg) const { return code_ == reg.code_; }
  int code() const {
139
    DCHECK(is_valid());
140 141
    return code_;
  }
142
  int bit() const {
143
    DCHECK(is_valid());
144 145 146 147 148 149 150
    return 1 << code_;
  }

  // Unfortunately we can't make this private in a struct.
  int code_;
};

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
#define REGISTER(N, C) \
  const int kRegister_ ## N ## _Code = C; \
  const Register N = { C }

REGISTER(no_reg, -1);
// Always zero.
REGISTER(zero_reg, 0);
// at: Reserved for synthetic instructions.
REGISTER(at, 1);
// v0, v1: Used when returning multiple values from subroutines.
REGISTER(v0, 2);
REGISTER(v1, 3);
// a0 - a4: Used to pass non-FP parameters.
REGISTER(a0, 4);
REGISTER(a1, 5);
REGISTER(a2, 6);
REGISTER(a3, 7);
// t0 - t9: Can be used without reservation, act as temporary registers and are
// allowed to be destroyed by subroutines.
REGISTER(t0, 8);
REGISTER(t1, 9);
REGISTER(t2, 10);
REGISTER(t3, 11);
REGISTER(t4, 12);
REGISTER(t5, 13);
REGISTER(t6, 14);
REGISTER(t7, 15);
// s0 - s7: Subroutine register variables. Subroutines that write to these
// registers must restore their values before exiting so that the caller can
// expect the values to be preserved.
REGISTER(s0, 16);
REGISTER(s1, 17);
REGISTER(s2, 18);
REGISTER(s3, 19);
REGISTER(s4, 20);
REGISTER(s5, 21);
REGISTER(s6, 22);
REGISTER(s7, 23);
REGISTER(t8, 24);
REGISTER(t9, 25);
// k0, k1: Reserved for system calls and interrupt handlers.
REGISTER(k0, 26);
REGISTER(k1, 27);
// gp: Reserved.
REGISTER(gp, 28);
// sp: Stack pointer.
REGISTER(sp, 29);
// fp: Frame pointer.
REGISTER(fp, 30);
// ra: Return address pointer.
REGISTER(ra, 31);

#undef REGISTER
204

205 206 207 208 209 210 211

int ToNumber(Register reg);

Register ToRegister(int num);

// Coprocessor register.
struct FPURegister {
212
  static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
213 214 215 216 217 218 219 220 221 222

  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
  // number of Double regs (64-bit regs, or FPU-reg-pairs).

  // A few double registers are reserved: one as a scratch register and one to
  // hold 0.0.
  //  f28: 0.0
  //  f30: scratch register.
  static const int kNumReservedRegisters = 2;
223
  static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
224 225
      kNumReservedRegisters;

226 227
  inline static int NumRegisters();
  inline static int NumAllocatableRegisters();
228 229 230 231

  // TODO(turbofan): Proper support for float32.
  inline static int NumAllocatableAliasedRegisters();

232
  inline static int ToAllocationIndex(FPURegister reg);
233
  static const char* AllocationIndexToString(int index);
234 235

  static FPURegister FromAllocationIndex(int index) {
236
    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
237
    return from_code(index * 2);
238 239 240 241 242 243 244
  }

  static FPURegister from_code(int code) {
    FPURegister r = { code };
    return r;
  }

245
  bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
246
  bool is(FPURegister creg) const { return code_ == creg.code_; }
247 248
  FPURegister low() const {
    // Find low reg of a Double-reg pair, which is the reg itself.
249
    DCHECK(code_ % 2 == 0);  // Specified Double reg must be even.
250 251
    FPURegister reg;
    reg.code_ = code_;
252
    DCHECK(reg.is_valid());
253 254 255 256
    return reg;
  }
  FPURegister high() const {
    // Find high reg of a Doubel-reg pair, which is reg + 1.
257
    DCHECK(code_ % 2 == 0);  // Specified Double reg must be even.
258 259
    FPURegister reg;
    reg.code_ = code_ + 1;
260
    DCHECK(reg.is_valid());
261 262 263
    return reg;
  }

264
  int code() const {
265
    DCHECK(is_valid());
266 267
    return code_;
  }
268
  int bit() const {
269
    DCHECK(is_valid());
270 271
    return 1 << code_;
  }
272 273
  void setcode(int f) {
    code_ = f;
274
    DCHECK(is_valid());
275
  }
276 277 278 279
  // Unfortunately we can't make this private in a struct.
  int code_;
};

280 281 282 283 284 285 286 287 288
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
// on f0 really uses f0 and f1.
// (Modern mips hardware also supports 32 64-bit registers, via setting
// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
// but it is not in common use. Someday we will want to support this in v8.)

// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
289
typedef FPURegister DoubleRegister;
290
typedef FPURegister FloatRegister;
291

292
const FPURegister no_freg = { -1 };
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

const FPURegister f0 = { 0 };  // Return value in hard float mode.
const FPURegister f1 = { 1 };
const FPURegister f2 = { 2 };
const FPURegister f3 = { 3 };
const FPURegister f4 = { 4 };
const FPURegister f5 = { 5 };
const FPURegister f6 = { 6 };
const FPURegister f7 = { 7 };
const FPURegister f8 = { 8 };
const FPURegister f9 = { 9 };
const FPURegister f10 = { 10 };
const FPURegister f11 = { 11 };
const FPURegister f12 = { 12 };  // Arg 0 in hard float mode.
const FPURegister f13 = { 13 };
const FPURegister f14 = { 14 };  // Arg 1 in hard float mode.
const FPURegister f15 = { 15 };
const FPURegister f16 = { 16 };
const FPURegister f17 = { 17 };
const FPURegister f18 = { 18 };
const FPURegister f19 = { 19 };
const FPURegister f20 = { 20 };
const FPURegister f21 = { 21 };
const FPURegister f22 = { 22 };
const FPURegister f23 = { 23 };
const FPURegister f24 = { 24 };
const FPURegister f25 = { 25 };
const FPURegister f26 = { 26 };
const FPURegister f27 = { 27 };
const FPURegister f28 = { 28 };
const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };

327 328
// Register aliases.
// cp is assumed to be a callee saved register.
329 330 331 332 333 334 335 336 337
// Defined using #define instead of "static const Register&" because Clang
// complains otherwise when a compilation unit that includes this header
// doesn't use the variables.
#define kRootRegister s6
#define cp s7
#define kLithiumScratchReg s3
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
338 339
// Used on mips32r6 for compare operations.
#define kDoubleCompareReg f31
340

341 342 343 344 345 346
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
  bool is_valid() const { return code_ == kFCSRRegister; }
  bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
  int code() const {
347
    DCHECK(is_valid());
348 349 350
    return code_;
  }
  int bit() const {
351
    DCHECK(is_valid());
352 353 354 355
    return 1 << code_;
  }
  void setcode(int f) {
    code_ = f;
356
    DCHECK(is_valid());
357 358 359
  }
  // Unfortunately we can't make this private in a struct.
  int code_;
360 361
};

362
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
363
const FPUControlRegister FCSR = { kFCSRRegister };
364 365 366 367 368 369 370 371 372 373


// -----------------------------------------------------------------------------
// Machine instruction Operands.

// Class Operand represents a shifter operand in data processing instructions.
class Operand BASE_EMBEDDED {
 public:
  // Immediate.
  INLINE(explicit Operand(int32_t immediate,
374
         RelocInfo::Mode rmode = RelocInfo::NONE32));
375 376 377 378 379 380 381 382 383 384 385 386 387
  INLINE(explicit Operand(const ExternalReference& f));
  INLINE(explicit Operand(const char* s));
  INLINE(explicit Operand(Object** opp));
  INLINE(explicit Operand(Context** cpp));
  explicit Operand(Handle<Object> handle);
  INLINE(explicit Operand(Smi* value));

  // Register.
  INLINE(explicit Operand(Register rm));

  // Return true if this is a register operand.
  INLINE(bool is_reg() const);

388
  inline int32_t immediate() const {
389
    DCHECK(!is_reg());
390 391 392
    return imm32_;
  }

393 394 395 396
  Register rm() const { return rm_; }

 private:
  Register rm_;
397
  int32_t imm32_;  // Valid if rm_ == no_reg.
398 399 400 401 402 403 404 405 406 407 408
  RelocInfo::Mode rmode_;

  friend class Assembler;
  friend class MacroAssembler;
};


// On MIPS we have only one adressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
 public:
plind44@gmail.com's avatar
plind44@gmail.com committed
409 410 411 412 413 414
  // Immediate value attached to offset.
  enum OffsetAddend {
    offset_minus_one = -1,
    offset_zero = 0
  };

415
  explicit MemOperand(Register rn, int32_t offset = 0);
plind44@gmail.com's avatar
plind44@gmail.com committed
416 417
  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
                      OffsetAddend offset_addend = offset_zero);
418
  int32_t offset() const { return offset_; }
419

420 421 422 423
  bool OffsetIsInt16Encodable() const {
    return is_int16(offset_);
  }

424
 private:
425
  int32_t offset_;
426 427 428 429 430

  friend class Assembler;
};


431
class Assembler : public AssemblerBase {
432 433 434 435 436 437 438 439 440 441 442 443 444 445
 public:
  // Create an assembler. Instructions and relocation information are emitted
  // into a buffer, with the instructions starting from the beginning and the
  // relocation information starting from the end of the buffer. See CodeDesc
  // for a detailed comment on the layout (globals.h).
  //
  // If the provided buffer is NULL, the assembler allocates and grows its own
  // buffer, and buffer_size determines the initial buffer size. The buffer is
  // owned by the assembler and deallocated upon destruction of the assembler.
  //
  // If the provided buffer is not NULL, the assembler uses the provided buffer
  // for code generation and assumes its size to be buffer_size. If the buffer
  // is too small, a fatal error occurs. No deallocation of the buffer is done
  // upon destruction of the assembler.
446
  Assembler(Isolate* isolate, void* buffer, int buffer_size);
447
  virtual ~Assembler() { }
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467

  // GetCode emits any pending (non-emitted) code and fills the descriptor
  // desc. GetCode() is idempotent; it returns the same result if no other
  // Assembler functions are invoked in between GetCode() calls.
  void GetCode(CodeDesc* desc);

  // Label operations & relative jumps (PPUM Appendix D).
  //
  // Takes a branch opcode (cc) and a label (L) and generates
  // either a backward branch or a forward branch and links it
  // to the label fixup chain. Usage:
  //
  // Label L;    // unbound label
  // j(cc, &L);  // forward branch to unbound label
  // bind(&L);   // bind label to the current pc
  // j(cc, &L);  // backward branch to bound label
  // bind(&L);   // illegal: a label may be bound only once
  //
  // Note: The same Label can be used for forward and backward branches
  // but it may be bound only once.
468
  void bind(Label* L);  // Binds an unbound label L to current code position.
469 470 471
  // Determines if Label is bound and near enough so that branch instruction
  // can be used to reach it, instead of jump instruction.
  bool is_near(Label* L);
472

473 474
  // Returns the branch offset to the given label from the current code
  // position. Links the label to the current position if it is still unbound.
475 476
  // Manages the jump elimination optimization if the second parameter is true.
  int32_t branch_offset(Label* L, bool jump_elimination_allowed);
477 478 479
  int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
  int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
  int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
480 481
  int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
    int32_t o = branch_offset(L, jump_elimination_allowed);
482
    DCHECK((o & 3) == 0);   // Assert the offset is aligned.
483 484
    return o >> 2;
  }
485 486 487 488 489 490
  int32_t shifted_branch_offset_compact(Label* L,
      bool jump_elimination_allowed) {
    int32_t o = branch_offset_compact(L, jump_elimination_allowed);
    DCHECK((o & 3) == 0);   // Assert the offset is aligned.
    return o >> 2;
  }
491
  uint32_t jump_address(Label* L);
492 493 494 495 496 497 498

  // Puts a labels target address at the given position.
  // The high 8 bits are set to zero.
  void label_at_put(Label* L, int at_offset);

  // Read/Modify the code target address in the branch/call instruction at pc.
  static Address target_address_at(Address pc);
499 500 501 502
  static void set_target_address_at(Address pc,
                                    Address target,
                                    ICacheFlushMode icache_flush_mode =
                                        FLUSH_ICACHE_IF_NEEDED);
503 504 505 506 507 508 509
  // On MIPS there is no Constant Pool so we skip that parameter.
  INLINE(static Address target_address_at(Address pc,
                                          ConstantPoolArray* constant_pool)) {
    return target_address_at(pc);
  }
  INLINE(static void set_target_address_at(Address pc,
                                           ConstantPoolArray* constant_pool,
510 511 512 513
                                           Address target,
                                           ICacheFlushMode icache_flush_mode =
                                               FLUSH_ICACHE_IF_NEEDED)) {
    set_target_address_at(pc, target, icache_flush_mode);
514 515 516 517 518 519 520
  }
  INLINE(static Address target_address_at(Address pc, Code* code)) {
    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
    return target_address_at(pc, constant_pool);
  }
  INLINE(static void set_target_address_at(Address pc,
                                           Code* code,
521 522 523
                                           Address target,
                                           ICacheFlushMode icache_flush_mode =
                                               FLUSH_ICACHE_IF_NEEDED)) {
524
    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
525
    set_target_address_at(pc, constant_pool, target, icache_flush_mode);
526
  }
527

528 529 530 531
  // Return the code target address at a call site from the return address
  // of that call in the instruction stream.
  inline static Address target_address_from_return_address(Address pc);

532 533 534
  // Return the code target address of the patch debug break slot
  inline static Address break_address_from_return_address(Address pc);

535 536
  static void JumpLabelToJumpRegister(Address pc);

537 538
  static void QuietNaN(HeapObject* nan);

539
  // This sets the branch destination (which gets loaded at the call address).
540 541 542
  // This is for calls and branches within generated code.  The serializer
  // has already deserialized the lui/ori instructions etc.
  inline static void deserialization_set_special_target_at(
543
      Address instruction_payload, Code* code, Address target) {
544 545
    set_target_address_at(
        instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
546
        code,
547
        target);
548 549
  }

550 551 552 553 554 555 556 557 558 559 560 561
  // Size of an instruction.
  static const int kInstrSize = sizeof(Instr);

  // Difference between address of current opcode and target address offset.
  static const int kBranchPCOffset = 4;

  // Here we are patching the address in the LUI/ORI instruction pair.
  // These values are used in the serialization process and must be zero for
  // MIPS platform, as Code, Embedded Object or External-reference pointers
  // are split across two consecutive instructions and don't exist separately
  // in the code, so the serializer should not step forwards in memory after
  // a target is resolved and written.
562
  static const int kSpecialTargetSize = 0;
563 564

  // Number of consecutive instructions used to store 32bit constant.
565 566 567 568 569 570 571
  // Before jump-optimizations, this constant was used in
  // RelocInfo::target_address_address() function to tell serializer address of
  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
  // optimization, where jump-through-register instruction that usually
  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
  static const int kInstructionsFor32BitConstant = 3;
572 573 574 575 576 577 578

  // Distance between the instruction referring to the address of the call
  // target and the return address.
  static const int kCallTargetAddressOffset = 4 * kInstrSize;

  // Distance between start of patched return sequence and the emitted address
  // to jump to.
579
  static const int kPatchReturnSequenceAddressOffset = 0;
580

581 582
  // Distance between start of patched debug break slot and the emitted address
  // to jump to.
583 584 585 586 587 588
  static const int kPatchDebugBreakSlotAddressOffset =  0 * kInstrSize;

  // Difference between address of current opcode and value read from pc
  // register.
  static const int kPcLoadDelta = 4;

589 590
  static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;

591 592 593
  // Number of instructions used for the JS return sequence. The constant is
  // used by the debugger to patch the JS return sequence.
  static const int kJSReturnSequenceInstructions = 7;
594 595
  static const int kJSReturnSequenceLength =
      kJSReturnSequenceInstructions * kInstrSize;
596 597 598 599
  static const int kDebugBreakSlotInstructions = 4;
  static const int kDebugBreakSlotLength =
      kDebugBreakSlotInstructions * kInstrSize;

600 601 602 603

  // ---------------------------------------------------------------------------
  // Code generation.

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
  // Insert the smallest number of nop instructions
  // possible to align the pc offset to a multiple
  // of m. m must be a power of 2 (>= 4).
  void Align(int m);
  // Aligns code to something that's optimal for a jump target for the platform.
  void CodeTargetAlign();

  // Different nop operations are used by the code generator to detect certain
  // states of the generated code.
  enum NopMarkerTypes {
    NON_MARKING_NOP = 0,
    DEBUG_BREAK_NOP,
    // IC markers.
    PROPERTY_ACCESS_INLINED,
    PROPERTY_ACCESS_INLINED_CONTEXT,
    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
    // Helper values.
    LAST_CODE_MARKER,
622 623
    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
    // Code aging
624 625
    CODE_AGE_MARKER_NOP = 6,
    CODE_AGE_SEQUENCE_NOP
626 627
  };

628 629 630
  // Type == 0 is the default non-marking nop. For mips this is a
  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
  // marking, to avoid conflict with ssnop and ehb instructions.
631
  void nop(unsigned int type = 0) {
632
    DCHECK(type < 32);
633 634
    Register nop_rt_reg = (type == 0) ? zero_reg : at;
    sll(zero_reg, nop_rt_reg, type, true);
635
  }
636 637


638
  // --------Branch-and-jump-instructions----------
639 640 641 642 643 644 645 646 647 648 649
  // We don't use likely variant of instructions.
  void b(int16_t offset);
  void b(Label* L) { b(branch_offset(L, false)>>2); }
  void bal(int16_t offset);
  void bal(Label* L) { bal(branch_offset(L, false)>>2); }

  void beq(Register rs, Register rt, int16_t offset);
  void beq(Register rs, Register rt, Label* L) {
    beq(rs, rt, branch_offset(L, false) >> 2);
  }
  void bgez(Register rs, int16_t offset);
650 651 652 653 654 655 656 657 658 659 660 661
  void bgezc(Register rt, int16_t offset);
  void bgezc(Register rt, Label* L) {
    bgezc(rt, branch_offset_compact(L, false)>>2);
  }
  void bgeuc(Register rs, Register rt, int16_t offset);
  void bgeuc(Register rs, Register rt, Label* L) {
    bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
  }
  void bgec(Register rs, Register rt, int16_t offset);
  void bgec(Register rs, Register rt, Label* L) {
    bgec(rs, rt, branch_offset_compact(L, false)>>2);
  }
662
  void bgezal(Register rs, int16_t offset);
663 664 665 666 667 668 669 670
  void bgezalc(Register rt, int16_t offset);
  void bgezalc(Register rt, Label* L) {
    bgezalc(rt, branch_offset_compact(L, false)>>2);
  }
  void bgezall(Register rs, int16_t offset);
  void bgezall(Register rs, Label* L) {
    bgezall(rs, branch_offset(L, false)>>2);
  }
671
  void bgtz(Register rs, int16_t offset);
672 673 674 675
  void bgtzc(Register rt, int16_t offset);
  void bgtzc(Register rt, Label* L) {
    bgtzc(rt, branch_offset_compact(L, false)>>2);
  }
676
  void blez(Register rs, int16_t offset);
677 678 679 680
  void blezc(Register rt, int16_t offset);
  void blezc(Register rt, Label* L) {
    blezc(rt, branch_offset_compact(L, false)>>2);
  }
681
  void bltz(Register rs, int16_t offset);
682 683 684 685 686 687 688 689 690 691 692 693
  void bltzc(Register rt, int16_t offset);
  void bltzc(Register rt, Label* L) {
    bltzc(rt, branch_offset_compact(L, false)>>2);
  }
  void bltuc(Register rs, Register rt, int16_t offset);
  void bltuc(Register rs, Register rt, Label* L) {
    bltuc(rs, rt, branch_offset_compact(L, false)>>2);
  }
  void bltc(Register rs, Register rt, int16_t offset);
  void bltc(Register rs, Register rt, Label* L) {
    bltc(rs, rt, branch_offset_compact(L, false)>>2);
  }
694
  void bltzal(Register rs, int16_t offset);
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
  void blezalc(Register rt, int16_t offset);
  void blezalc(Register rt, Label* L) {
    blezalc(rt, branch_offset_compact(L, false)>>2);
  }
  void bltzalc(Register rt, int16_t offset);
  void bltzalc(Register rt, Label* L) {
    bltzalc(rt, branch_offset_compact(L, false)>>2);
  }
  void bgtzalc(Register rt, int16_t offset);
  void bgtzalc(Register rt, Label* L) {
    bgtzalc(rt, branch_offset_compact(L, false)>>2);
  }
  void beqzalc(Register rt, int16_t offset);
  void beqzalc(Register rt, Label* L) {
    beqzalc(rt, branch_offset_compact(L, false)>>2);
  }
  void beqc(Register rs, Register rt, int16_t offset);
  void beqc(Register rs, Register rt, Label* L) {
    beqc(rs, rt, branch_offset_compact(L, false)>>2);
  }
  void beqzc(Register rs, int32_t offset);
  void beqzc(Register rs, Label* L) {
    beqzc(rs, branch_offset21_compact(L, false)>>2);
  }
  void bnezalc(Register rt, int16_t offset);
  void bnezalc(Register rt, Label* L) {
    bnezalc(rt, branch_offset_compact(L, false)>>2);
  }
  void bnec(Register rs, Register rt, int16_t offset);
  void bnec(Register rs, Register rt, Label* L) {
    bnec(rs, rt, branch_offset_compact(L, false)>>2);
  }
  void bnezc(Register rt, int32_t offset);
  void bnezc(Register rt, Label* L) {
    bnezc(rt, branch_offset21_compact(L, false)>>2);
  }
731 732 733 734
  void bne(Register rs, Register rt, int16_t offset);
  void bne(Register rs, Register rt, Label* L) {
    bne(rs, rt, branch_offset(L, false)>>2);
  }
735 736 737 738 739 740 741 742
  void bovc(Register rs, Register rt, int16_t offset);
  void bovc(Register rs, Register rt, Label* L) {
    bovc(rs, rt, branch_offset_compact(L, false)>>2);
  }
  void bnvc(Register rs, Register rt, int16_t offset);
  void bnvc(Register rs, Register rt, Label* L) {
    bnvc(rs, rt, branch_offset_compact(L, false)>>2);
  }
743 744

  // Never use the int16_t b(l)cond version with a branch offset
745
  // instead of using the Label* version.
746

747
  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
748 749 750 751
  void j(int32_t target);
  void jal(int32_t target);
  void jalr(Register rs, Register rd = ra);
  void jr(Register target);
752 753
  void j_or_jr(int32_t target, Register rs);
  void jal_or_jalr(int32_t target, Register rs);
754 755


756
  // -------Data-processing-instructions---------
757 758 759 760 761 762 763 764

  // Arithmetic.
  void addu(Register rd, Register rs, Register rt);
  void subu(Register rd, Register rs, Register rt);
  void mult(Register rs, Register rt);
  void multu(Register rs, Register rt);
  void div(Register rs, Register rt);
  void divu(Register rs, Register rt);
765 766 767 768
  void div(Register rd, Register rs, Register rt);
  void divu(Register rd, Register rs, Register rt);
  void mod(Register rd, Register rs, Register rt);
  void modu(Register rd, Register rs, Register rt);
769
  void mul(Register rd, Register rs, Register rt);
770 771 772
  void muh(Register rd, Register rs, Register rt);
  void mulu(Register rd, Register rs, Register rt);
  void muhu(Register rd, Register rs, Register rt);
773 774 775 776 777 778 779 780 781 782 783 784 785

  void addiu(Register rd, Register rs, int32_t j);

  // Logical.
  void and_(Register rd, Register rs, Register rt);
  void or_(Register rd, Register rs, Register rt);
  void xor_(Register rd, Register rs, Register rt);
  void nor(Register rd, Register rs, Register rt);

  void andi(Register rd, Register rs, int32_t j);
  void ori(Register rd, Register rs, int32_t j);
  void xori(Register rd, Register rs, int32_t j);
  void lui(Register rd, int32_t j);
786
  void aui(Register rs, Register rt, int32_t j);
787 788

  // Shifts.
789 790 791 792
  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
  // and may cause problems in normal code. coming_from_nop makes sure this
  // doesn't happen.
  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
793 794 795 796 797
  void sllv(Register rd, Register rt, Register rs);
  void srl(Register rd, Register rt, uint16_t sa);
  void srlv(Register rd, Register rt, Register rs);
  void sra(Register rt, Register rd, uint16_t sa);
  void srav(Register rt, Register rd, Register rs);
798 799
  void rotr(Register rd, Register rt, uint16_t sa);
  void rotrv(Register rd, Register rt, Register rs);
800 801


802
  // ------------Memory-instructions-------------
803 804 805

  void lb(Register rd, const MemOperand& rs);
  void lbu(Register rd, const MemOperand& rs);
806 807
  void lh(Register rd, const MemOperand& rs);
  void lhu(Register rd, const MemOperand& rs);
808
  void lw(Register rd, const MemOperand& rs);
809 810
  void lwl(Register rd, const MemOperand& rs);
  void lwr(Register rd, const MemOperand& rs);
811
  void sb(Register rd, const MemOperand& rs);
812
  void sh(Register rd, const MemOperand& rs);
813
  void sw(Register rd, const MemOperand& rs);
814 815
  void swl(Register rd, const MemOperand& rs);
  void swr(Register rd, const MemOperand& rs);
816 817


818
  // ----------------Prefetch--------------------
plind44@gmail.com's avatar
plind44@gmail.com committed
819 820 821 822

  void pref(int32_t hint, const MemOperand& rs);


823
  // -------------Misc-instructions--------------
824 825

  // Break / Trap instructions.
826 827
  void break_(uint32_t code, bool break_as_stop = false);
  void stop(const char* msg, uint32_t code = kMaxStopCode);
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
  void tge(Register rs, Register rt, uint16_t code);
  void tgeu(Register rs, Register rt, uint16_t code);
  void tlt(Register rs, Register rt, uint16_t code);
  void tltu(Register rs, Register rt, uint16_t code);
  void teq(Register rs, Register rt, uint16_t code);
  void tne(Register rs, Register rt, uint16_t code);

  // Move from HI/LO register.
  void mfhi(Register rd);
  void mflo(Register rd);

  // Set on less than.
  void slt(Register rd, Register rs, Register rt);
  void sltu(Register rd, Register rs, Register rt);
  void slti(Register rd, Register rs, int32_t j);
  void sltiu(Register rd, Register rs, int32_t j);

845 846 847 848 849 850
  // Conditional move.
  void movz(Register rd, Register rs, Register rt);
  void movn(Register rd, Register rs, Register rt);
  void movt(Register rd, Register rs, uint16_t cc = 0);
  void movf(Register rd, Register rs, uint16_t cc = 0);

851 852 853 854 855 856 857 858 859
  void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
      FPURegister fs, uint8_t sel);
  void seleqz(Register rs, Register rt, Register rd);
  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
      FPURegister fs);
  void selnez(Register rs, Register rt, Register rd);
  void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
      FPURegister fs);

860 861 862 863
  // Bit twiddling.
  void clz(Register rd, Register rs);
  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
864

865
  // --------Coprocessor-instructions----------------
866 867 868 869 870 871 872 873

  // Load, store, and move.
  void lwc1(FPURegister fd, const MemOperand& src);
  void ldc1(FPURegister fd, const MemOperand& src);

  void swc1(FPURegister fs, const MemOperand& dst);
  void sdc1(FPURegister fs, const MemOperand& dst);

874
  void mtc1(Register rt, FPURegister fs);
875 876
  void mthc1(Register rt, FPURegister fs);

877
  void mfc1(Register rt, FPURegister fs);
878
  void mfhc1(Register rt, FPURegister fs);
879 880 881 882 883 884 885 886

  void ctc1(Register rt, FPUControlRegister fs);
  void cfc1(Register rt, FPUControlRegister fs);

  // Arithmetic.
  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
887
  void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
888 889 890 891 892
  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
  void abs_d(FPURegister fd, FPURegister fs);
  void mov_d(FPURegister fd, FPURegister fs);
  void neg_d(FPURegister fd, FPURegister fs);
  void sqrt_d(FPURegister fd, FPURegister fs);
893 894 895 896

  // Conversion.
  void cvt_w_s(FPURegister fd, FPURegister fs);
  void cvt_w_d(FPURegister fd, FPURegister fs);
897 898 899 900 901 902 903 904
  void trunc_w_s(FPURegister fd, FPURegister fs);
  void trunc_w_d(FPURegister fd, FPURegister fs);
  void round_w_s(FPURegister fd, FPURegister fs);
  void round_w_d(FPURegister fd, FPURegister fs);
  void floor_w_s(FPURegister fd, FPURegister fs);
  void floor_w_d(FPURegister fd, FPURegister fs);
  void ceil_w_s(FPURegister fd, FPURegister fs);
  void ceil_w_d(FPURegister fd, FPURegister fs);
905 906 907

  void cvt_l_s(FPURegister fd, FPURegister fs);
  void cvt_l_d(FPURegister fd, FPURegister fs);
908 909 910 911 912 913 914 915
  void trunc_l_s(FPURegister fd, FPURegister fs);
  void trunc_l_d(FPURegister fd, FPURegister fs);
  void round_l_s(FPURegister fd, FPURegister fs);
  void round_l_d(FPURegister fd, FPURegister fs);
  void floor_l_s(FPURegister fd, FPURegister fs);
  void floor_l_d(FPURegister fd, FPURegister fs);
  void ceil_l_s(FPURegister fd, FPURegister fs);
  void ceil_l_d(FPURegister fd, FPURegister fs);
916

917 918 919 920 921
  void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
  void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
  void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
  void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);

922 923 924 925 926 927 928 929
  void cvt_s_w(FPURegister fd, FPURegister fs);
  void cvt_s_l(FPURegister fd, FPURegister fs);
  void cvt_s_d(FPURegister fd, FPURegister fs);

  void cvt_d_w(FPURegister fd, FPURegister fs);
  void cvt_d_l(FPURegister fd, FPURegister fs);
  void cvt_d_s(FPURegister fd, FPURegister fs);

930 931 932 933 934 935 936 937 938 939 940 941 942 943
  // Conditions and branches for MIPSr6.
  void cmp(FPUCondition cond, SecondaryField fmt,
         FPURegister fd, FPURegister ft, FPURegister fs);

  void bc1eqz(int16_t offset, FPURegister ft);
  void bc1eqz(Label* L, FPURegister ft) {
    bc1eqz(branch_offset(L, false)>>2, ft);
  }
  void bc1nez(int16_t offset, FPURegister ft);
  void bc1nez(Label* L, FPURegister ft) {
    bc1nez(branch_offset(L, false)>>2, ft);
  }

  // Conditions and branches for non MIPSr6.
944 945 946 947 948 949 950
  void c(FPUCondition cond, SecondaryField fmt,
         FPURegister ft, FPURegister fs, uint16_t cc = 0);

  void bc1f(int16_t offset, uint16_t cc = 0);
  void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
  void bc1t(int16_t offset, uint16_t cc = 0);
  void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
951
  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
952 953

  // Check the code size generated from label to here.
954 955 956 957 958 959 960
  int SizeOfCodeGeneratedSince(Label* label) {
    return pc_offset() - label->pos();
  }

  // Check the number of instructions generated from label to here.
  int InstructionsGeneratedSince(Label* label) {
    return SizeOfCodeGeneratedSince(label) / kInstrSize;
961 962
  }

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
  // Class for scoping postponing the trampoline pool generation.
  class BlockTrampolinePoolScope {
   public:
    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
      assem_->StartBlockTrampolinePool();
    }
    ~BlockTrampolinePoolScope() {
      assem_->EndBlockTrampolinePool();
    }

   private:
    Assembler* assem_;

    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
  };

979 980 981 982 983 984 985 986 987 988 989 990 991
  // Class for postponing the assembly buffer growth. Typically used for
  // sequences of instructions that must be emitted as a unit, before
  // buffer growth (and relocation) can occur.
  // This blocking scope is not nestable.
  class BlockGrowBufferScope {
   public:
    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
      assem_->StartBlockGrowBuffer();
    }
    ~BlockGrowBufferScope() {
      assem_->EndBlockGrowBuffer();
    }

992 993
   private:
    Assembler* assem_;
994

995
    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
996 997
  };

998 999 1000 1001 1002
  // Debugging.

  // Mark address of the ExitJSFrame code.
  void RecordJSReturn();

1003 1004 1005
  // Mark address of a debug break slot.
  void RecordDebugBreakSlot();

1006 1007
  // Record the AST id of the CallIC being compiled, so that it can be placed
  // in the relocation information.
1008
  void SetRecordedAstId(TypeFeedbackId ast_id) {
1009
    DCHECK(recorded_ast_id_.IsNone());
1010 1011 1012
    recorded_ast_id_ = ast_id;
  }

1013
  TypeFeedbackId RecordedAstId() {
1014
    DCHECK(!recorded_ast_id_.IsNone());
1015 1016 1017
    return recorded_ast_id_;
  }

1018
  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
1019

1020
  // Record a comment relocation entry that can be used by a disassembler.
1021
  // Use --code-comments to enable.
1022 1023
  void RecordComment(const char* msg);

1024 1025
  // Record a deoptimization reason that can be used by a log or cpu profiler.
  // Use --trace-deopt to enable.
1026
  void RecordDeoptReason(const int reason, const SourcePosition position);
1027 1028


1029 1030
  static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                       intptr_t pc_delta);
1031

1032 1033 1034 1035
  // Writes a single byte or word of data in the code stream.  Used for
  // inline tables, e.g., jump-tables.
  void db(uint8_t data);
  void dd(uint32_t data);
1036
  void dd(Label* label);
1037

1038 1039 1040
  // Emits the address of the code stub's first instruction.
  void emit_code_stub_address(Code* stub);

1041 1042 1043 1044 1045 1046
  PositionsRecorder* positions_recorder() { return &positions_recorder_; }

  // Postpone the generation of the trampoline pool for the specified number of
  // instructions.
  void BlockTrampolinePoolFor(int instructions);

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
  // Check if there is less than kGap bytes available in the buffer.
  // If this is the case, we need to grow the buffer before emitting
  // an instruction or relocation information.
  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }

  // Get the number of bytes available in the buffer.
  inline int available_space() const { return reloc_info_writer.pos() - pc_; }

  // Read/patch instructions.
  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
1057
  static void instr_at_put(byte* pc, Instr instr) {
1058 1059 1060 1061 1062 1063 1064 1065
    *reinterpret_cast<Instr*>(pc) = instr;
  }
  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
  void instr_at_put(int pos, Instr instr) {
    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
  }

  // Check if an instruction is a branch of some kind.
1066
  static bool IsBranch(Instr instr);
1067 1068
  static bool IsBeq(Instr instr);
  static bool IsBne(Instr instr);
1069

1070 1071 1072 1073 1074
  static bool IsJump(Instr instr);
  static bool IsJ(Instr instr);
  static bool IsLui(Instr instr);
  static bool IsOri(Instr instr);

1075 1076 1077 1078
  static bool IsJal(Instr instr);
  static bool IsJr(Instr instr);
  static bool IsJalr(Instr instr);

1079 1080 1081 1082 1083 1084 1085 1086
  static bool IsNop(Instr instr, unsigned int type);
  static bool IsPop(Instr instr);
  static bool IsPush(Instr instr);
  static bool IsLwRegFpOffset(Instr instr);
  static bool IsSwRegFpOffset(Instr instr);
  static bool IsLwRegFpNegOffset(Instr instr);
  static bool IsSwRegFpNegOffset(Instr instr);

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
  static Register GetRtReg(Instr instr);
  static Register GetRsReg(Instr instr);
  static Register GetRdReg(Instr instr);

  static uint32_t GetRt(Instr instr);
  static uint32_t GetRtField(Instr instr);
  static uint32_t GetRs(Instr instr);
  static uint32_t GetRsField(Instr instr);
  static uint32_t GetRd(Instr instr);
  static uint32_t GetRdField(Instr instr);
  static uint32_t GetSa(Instr instr);
  static uint32_t GetSaField(Instr instr);
  static uint32_t GetOpcodeField(Instr instr);
1100 1101
  static uint32_t GetFunction(Instr instr);
  static uint32_t GetFunctionField(Instr instr);
1102 1103
  static uint32_t GetImmediate16(Instr instr);
  static uint32_t GetLabelConst(Instr instr);
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114

  static int32_t GetBranchOffset(Instr instr);
  static bool IsLw(Instr instr);
  static int16_t GetLwOffset(Instr instr);
  static Instr SetLwOffset(Instr instr, int16_t offset);

  static bool IsSw(Instr instr);
  static Instr SetSwOffset(Instr instr, int16_t offset);
  static bool IsAddImmediate(Instr instr);
  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);

1115
  static bool IsAndImmediate(Instr instr);
1116
  static bool IsEmittedConstant(Instr instr);
1117

1118
  void CheckTrampolinePool();
1119

1120
  // Allocate a constant pool of the correct size for the generated code.
1121
  Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
1122 1123 1124 1125

  // Generate the constant pool for the generated code.
  void PopulateConstantPool(ConstantPoolArray* constant_pool);

1126
 protected:
1127 1128 1129
  // Relocation for a type-recording IC has the AST id added to it.  This
  // member variable is a way to pass the information from the call site to
  // the relocation info.
1130
  TypeFeedbackId recorded_ast_id_;
1131

1132
  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1133 1134

  // Decode branch instruction at pos and return branch target pos.
1135
  int target_at(int pos, bool is_internal);
1136 1137

  // Patch branch instruction at pos to branch to given branch target pos.
1138
  void target_at_put(int pos, int target_pos, bool is_internal);
1139 1140

  // Say if we need to relocate with this mode.
1141
  bool MustUseReg(RelocInfo::Mode rmode);
1142 1143 1144 1145

  // Record reloc info for current pc_.
  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);

1146 1147 1148 1149 1150 1151 1152 1153 1154
  // Block the emission of the trampoline pool before pc_offset.
  void BlockTrampolinePoolBefore(int pc_offset) {
    if (no_trampoline_pool_before_ < pc_offset)
      no_trampoline_pool_before_ = pc_offset;
  }

  void StartBlockTrampolinePool() {
    trampoline_pool_blocked_nesting_++;
  }
1155

1156 1157 1158 1159 1160 1161 1162 1163
  void EndBlockTrampolinePool() {
    trampoline_pool_blocked_nesting_--;
  }

  bool is_trampoline_pool_blocked() const {
    return trampoline_pool_blocked_nesting_ > 0;
  }

1164 1165 1166 1167
  bool has_exception() const {
    return internal_trampoline_exception_;
  }

1168 1169
  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);

1170 1171 1172 1173 1174 1175
  bool is_trampoline_emitted() const {
    return trampoline_emitted_;
  }

  // Temporarily block automatic assembly buffer growth.
  void StartBlockGrowBuffer() {
1176
    DCHECK(!block_buffer_growth_);
1177 1178 1179 1180
    block_buffer_growth_ = true;
  }

  void EndBlockGrowBuffer() {
1181
    DCHECK(block_buffer_growth_);
1182 1183 1184 1185 1186 1187 1188
    block_buffer_growth_ = false;
  }

  bool is_buffer_growth_blocked() const {
    return block_buffer_growth_;
  }

1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
 private:
  // Buffer size and constant pool distance are checked together at regular
  // intervals of kBufferCheckInterval emitted bytes.
  static const int kBufferCheckInterval = 1*KB/2;

  // Code generation.
  // The relocation writer's position is at least kGap bytes below the end of
  // the generated instructions. This is so that multi-instruction sequences do
  // not have to check for overflow. The same is true for writes of large
  // relocation info entries.
  static const int kGap = 32;

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216

  // Repeated checking whether the trampoline pool should be emitted is rather
  // expensive. By default we only check again once a number of instructions
  // has been generated.
  static const int kCheckConstIntervalInst = 32;
  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;

  int next_buffer_check_;  // pc offset of next buffer check.

  // Emission of the trampoline pool may be blocked in some code sequences.
  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
  int no_trampoline_pool_before_;  // Block emission before this pc offset.

  // Keep track of the last emitted pool to guarantee a maximal distance.
  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.

1217 1218 1219
  // Automatic growth of the assembly buffer may be blocked for some sequences.
  bool block_buffer_growth_;  // Block growth when true.

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
  // Relocation information generation.
  // Each relocation is encoded as a variable size value.
  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
  RelocInfoWriter reloc_info_writer;

  // The bound position, before this we cannot do instruction elimination.
  int last_bound_pos_;

  // Code emission.
  inline void CheckBuffer();
  void GrowBuffer();
  inline void emit(Instr x);
1232
  inline void CheckTrampolinePoolQuick();
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248

  // Instruction generation.
  // We have 3 different kind of encoding layout on MIPS.
  // However due to many different types of objects encoded in the same fields
  // we have quite a few aliases for each mode.
  // Using the same structure to refer to Register and FPURegister would spare a
  // few aliases, but mixing both does not look clean to me.
  // Anyway we could surely implement this differently.

  void GenInstrRegister(Opcode opcode,
                        Register rs,
                        Register rt,
                        Register rd,
                        uint16_t sa = 0,
                        SecondaryField func = NULLSF);

1249 1250 1251 1252 1253 1254 1255
  void GenInstrRegister(Opcode opcode,
                        Register rs,
                        Register rt,
                        uint16_t msb,
                        uint16_t lsb,
                        SecondaryField func);

1256 1257 1258 1259 1260 1261 1262
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        FPURegister ft,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1263 1264 1265 1266 1267 1268 1269
  void GenInstrRegister(Opcode opcode,
                        FPURegister fr,
                        FPURegister ft,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1270 1271 1272 1273 1274 1275 1276
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        Register rt,
                        FPURegister fs,
                        FPURegister fd,
                        SecondaryField func = NULLSF);

1277 1278 1279 1280 1281 1282
  void GenInstrRegister(Opcode opcode,
                        SecondaryField fmt,
                        Register rt,
                        FPUControlRegister fs,
                        SecondaryField func = NULLSF);

1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300

  void GenInstrImmediate(Opcode opcode,
                         Register rs,
                         Register rt,
                         int32_t  j);
  void GenInstrImmediate(Opcode opcode,
                         Register rs,
                         SecondaryField SF,
                         int32_t  j);
  void GenInstrImmediate(Opcode opcode,
                         Register r1,
                         FPURegister r2,
                         int32_t  j);


  void GenInstrJump(Opcode opcode,
                     uint32_t address);

1301 1302
  // Helpers.
  void LoadRegPlusOffsetToAt(const MemOperand& src);
1303 1304 1305 1306

  // Labels.
  void print(Label* L);
  void bind_to(Label* L, int pos);
1307
  void next(Label* L, bool is_internal);
1308

1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
  // One trampoline consists of:
  // - space for trampoline slots,
  // - space for labels.
  //
  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
  // Space for trampoline slots preceeds space for labels. Each label is of one
  // instruction size, so total amount for labels is equal to
  // label_count *  kInstrSize.
  class Trampoline {
   public:
1319 1320 1321 1322 1323 1324 1325
    Trampoline() {
      start_ = 0;
      next_slot_ = 0;
      free_slot_count_ = 0;
      end_ = 0;
    }
    Trampoline(int start, int slot_count) {
1326 1327 1328
      start_ = start;
      next_slot_ = start;
      free_slot_count_ = slot_count;
1329
      end_ = start + slot_count * kTrampolineSlotsSize;
1330 1331 1332 1333 1334 1335 1336 1337
    }
    int start() {
      return start_;
    }
    int end() {
      return end_;
    }
    int take_slot() {
1338 1339 1340 1341 1342
      int trampoline_slot = kInvalidSlotPos;
      if (free_slot_count_ <= 0) {
        // We have run out of space on trampolines.
        // Make sure we fail in debug mode, so we become aware of each case
        // when this happens.
1343
        DCHECK(0);
1344 1345 1346 1347
        // Internal exception will be caught.
      } else {
        trampoline_slot = next_slot_;
        free_slot_count_--;
1348
        next_slot_ += kTrampolineSlotsSize;
1349
      }
1350 1351
      return trampoline_slot;
    }
1352

1353 1354 1355 1356 1357 1358 1359
   private:
    int start_;
    int end_;
    int next_slot_;
    int free_slot_count_;
  };

1360 1361 1362 1363 1364 1365 1366 1367 1368
  int32_t get_trampoline_entry(int32_t pos);
  int unbound_labels_count_;
  // If trampoline is emitted, generated code is becoming large. As this is
  // already a slow case which can possibly break our code generation for the
  // extreme case, we use this information to trigger different mode of
  // branch instruction generation, where we use jump instructions rather
  // than regular branch instructions.
  bool trampoline_emitted_;
  static const int kTrampolineSlotsSize = 4 * kInstrSize;
1369
  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
1370
  static const int kInvalidSlotPos = -1;
1371

1372 1373 1374 1375
  // Internal reference positions, required for unbounded internal reference
  // labels.
  std::set<int> internal_reference_positions_;

1376
  Trampoline trampoline_;
1377
  bool internal_trampoline_exception_;
1378

1379 1380
  friend class RegExpMacroAssemblerMIPS;
  friend class RelocInfo;
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
  friend class CodePatcher;
  friend class BlockTrampolinePoolScope;

  PositionsRecorder positions_recorder_;
  friend class PositionsRecorder;
  friend class EnsureSpace;
};


class EnsureSpace BASE_EMBEDDED {
 public:
  explicit EnsureSpace(Assembler* assembler) {
    assembler->CheckBuffer();
  }
1395 1396 1397 1398 1399
};

} }  // namespace v8::internal

#endif  // V8_ARM_ASSEMBLER_MIPS_H_