assembler-ppc.cc 61.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.

// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.

37 38
#include "src/ppc/assembler-ppc.h"

39 40 41 42
#if V8_TARGET_ARCH_PPC

#include "src/base/bits.h"
#include "src/base/cpu.h"
43
#include "src/deoptimizer.h"
44 45
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
46
#include "src/string-constants.h"
47 48 49 50 51 52 53 54 55 56 57 58 59

namespace v8 {
namespace internal {

// Get the CPU features enabled by the build.
static unsigned CpuFeaturesImpliedByCompiler() {
  unsigned answer = 0;
  return answer;
}


void CpuFeatures::ProbeImpl(bool cross_compile) {
  supported_ |= CpuFeaturesImpliedByCompiler();
60
  icache_line_size_ = 128;
61 62 63 64 65 66 67 68 69 70

  // Only use statically determined features for cross compile (snapshot).
  if (cross_compile) return;

// Detect whether frim instruction is supported (POWER5+)
// For now we will just check for processors we know do not
// support it
#ifndef USE_SIMULATOR
  // Probe for additional features at runtime.
  base::CPU cpu;
71 72 73
  if (cpu.part() == base::CPU::PPC_POWER9) {
    supported_ |= (1u << MODULO);
  }
74 75 76 77 78 79 80 81 82 83
#if V8_TARGET_ARCH_PPC64
  if (cpu.part() == base::CPU::PPC_POWER8) {
    supported_ |= (1u << FPR_GPR_MOV);
  }
#endif
  if (cpu.part() == base::CPU::PPC_POWER6 ||
      cpu.part() == base::CPU::PPC_POWER7 ||
      cpu.part() == base::CPU::PPC_POWER8) {
    supported_ |= (1u << LWSYNC);
  }
84 85 86
  if (cpu.part() == base::CPU::PPC_POWER7 ||
      cpu.part() == base::CPU::PPC_POWER8) {
    supported_ |= (1u << ISELECT);
87
    supported_ |= (1u << VSX);
88
  }
89 90 91 92 93
#if V8_OS_LINUX
  if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
    // Assume support
    supported_ |= (1u << FPU);
  }
94 95 96
  if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
    icache_line_size_ = cpu.icache_line_size();
  }
97 98 99 100 101 102 103
#elif V8_OS_AIX
  // Assume support FP support and default cache line size
  supported_ |= (1u << FPU);
#endif
#else  // Simulator
  supported_ |= (1u << FPU);
  supported_ |= (1u << LWSYNC);
104
  supported_ |= (1u << ISELECT);
105
  supported_ |= (1u << VSX);
106
  supported_ |= (1u << MODULO);
107 108 109 110 111 112 113 114
#if V8_TARGET_ARCH_PPC64
  supported_ |= (1u << FPR_GPR_MOV);
#endif
#endif
}


void CpuFeatures::PrintTarget() {
115
  const char* ppc_arch = nullptr;
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144

#if V8_TARGET_ARCH_PPC64
  ppc_arch = "ppc64";
#else
  ppc_arch = "ppc";
#endif

  printf("target %s\n", ppc_arch);
}


void CpuFeatures::PrintFeatures() {
  printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
}


Register ToRegister(int num) {
  DCHECK(num >= 0 && num < kNumRegisters);
  const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
                                 r8,  r9,  r10, r11, ip,  r13, r14, r15,
                                 r16, r17, r18, r19, r20, r21, r22, r23,
                                 r24, r25, r26, r27, r28, r29, r30, fp};
  return kRegisters[num];
}


// -----------------------------------------------------------------------------
// Implementation of RelocInfo

145 146 147
const int RelocInfo::kApplyMask =
    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
148 149 150 151

bool RelocInfo::IsCodedSpecially() {
  // The deserializer needs to know whether a pointer is specially
  // coded.  Being specially coded on PPC means that it is a lis/ori
152 153
  // instruction sequence or is a constant pool entry, and these are
  // always the case inside code objects.
154 155 156 157 158
  return true;
}


bool RelocInfo::IsInConstantPool() {
159 160
  if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
    return Assembler::IsConstantPoolLoadStart(pc_);
161
  }
162 163 164
  return false;
}

165 166
uint32_t RelocInfo::wasm_call_tag() const {
  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
167 168 169 170
  return static_cast<uint32_t>(
      Assembler::target_address_at(pc_, constant_pool_));
}

171 172 173 174
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-ppc-inl.h for inlined constructors

175
Operand::Operand(Handle<HeapObject> handle) {
176
  rm_ = no_reg;
177
  value_.immediate = static_cast<intptr_t>(handle.address());
178
  rmode_ = RelocInfo::EMBEDDED_OBJECT;
179 180
}

181 182 183 184 185 186 187 188 189
Operand Operand::EmbeddedNumber(double value) {
  int32_t smi;
  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
  result.is_heap_object_request_ = true;
  result.value_.heap_object_request = HeapObjectRequest(value);
  return result;
}

190 191 192 193 194 195 196
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
  result.is_heap_object_request_ = true;
  result.value_.heap_object_request = HeapObjectRequest(str);
  return result;
}

197 198
MemOperand::MemOperand(Register rn, int32_t offset)
    : ra_(rn), offset_(offset), rb_(no_reg) {}
199

200 201
MemOperand::MemOperand(Register ra, Register rb)
    : ra_(ra), offset_(0), rb_(rb) {}
202

203
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
204
  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
205 206 207
  for (auto& request : heap_object_requests_) {
    Handle<HeapObject> object;
    switch (request.kind()) {
208
      case HeapObjectRequest::kHeapNumber: {
209 210
        object =
            isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
211
        break;
212 213 214 215 216 217 218
      }
      case HeapObjectRequest::kStringConstant: {
        const StringConstantBase* str = request.string();
        CHECK_NOT_NULL(str);
        object = str->AllocateStringConstant(isolate);
        break;
      }
219
    }
220
    Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
221
    Address constant_pool = kNullAddress;
222
    set_target_address_at(pc, constant_pool, object.address(),
223 224 225
                          SKIP_ICACHE_FLUSH);
  }
}
226 227 228 229

// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.

230 231 232
Assembler::Assembler(const AssemblerOptions& options,
                     std::unique_ptr<AssemblerBuffer> buffer)
    : AssemblerBase(options, std::move(buffer)),
233
      constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
234
  reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
235 236 237

  no_trampoline_pool_before_ = 0;
  trampoline_pool_blocked_nesting_ = 0;
238
  constant_pool_entry_sharing_blocked_nesting_ = 0;
239
  next_trampoline_check_ = kMaxInt;
240 241
  internal_trampoline_exception_ = false;
  last_bound_pos_ = 0;
242
  optimizable_cmpi_pos_ = -1;
243
  trampoline_emitted_ = FLAG_force_long_branches;
244
  tracked_branch_count_ = 0;
245
  relocations_.reserve(128);
246 247
}

248 249 250
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
                        SafepointTableBuilder* safepoint_table_builder,
                        int handler_table_offset) {
251
  // Emit constant pool if necessary.
252
  int constant_pool_size = EmitConstantPool();
253

254
  EmitRelocations();
255 256 257

  int code_comments_size = WriteCodeComments();

258
  AllocateAndInstallRequestedHeapObjects(isolate);
259

260
  // Set up code descriptor.
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
  // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
  // this point to make CodeDesc initialization less fiddly.

  const int instruction_size = pc_offset();
  const int code_comments_offset = instruction_size - code_comments_size;
  const int constant_pool_offset = code_comments_offset - constant_pool_size;
  const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
                                        ? constant_pool_offset
                                        : handler_table_offset;
  const int safepoint_table_offset =
      (safepoint_table_builder == kNoSafepointTable)
          ? handler_table_offset2
          : safepoint_table_builder->GetCodeOffset();
  const int reloc_info_offset =
      static_cast<int>(reloc_info_writer.pos() - buffer_->start());
  CodeDesc::Initialize(desc, this, safepoint_table_offset,
                       handler_table_offset2, constant_pool_offset,
                       code_comments_offset, reloc_info_offset);
279 280 281
}

void Assembler::Align(int m) {
282
  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
283
  DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
284
  while ((pc_offset() & (m - 1)) != 0) {
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
    nop();
  }
}


void Assembler::CodeTargetAlign() { Align(8); }


Condition Assembler::GetCondition(Instr instr) {
  switch (instr & kCondMask) {
    case BT:
      return eq;
    case BF:
      return ne;
    default:
      UNIMPLEMENTED();
  }
  return al;
}


bool Assembler::IsLis(Instr instr) {
307
  return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
308 309 310 311
}


bool Assembler::IsLi(Instr instr) {
312
  return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
313 314 315 316 317 318 319 320 321 322 323 324 325
}


bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }


bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }


bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }


Register Assembler::GetRA(Instr instr) {
326
  return Register::from_code(Instruction::RAValue(instr));
327 328 329 330
}


Register Assembler::GetRB(Instr instr) {
331
  return Register::from_code(Instruction::RBValue(instr));
332 333 334 335 336 337 338 339 340 341 342 343 344
}


#if V8_TARGET_ARCH_PPC64
// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
                                   Instr instr4, Instr instr5) {
  // Check the instructions are indeed a five part load (into r12)
  // 3d800000       lis     r12, 0
  // 618c0000       ori     r12, r12, 0
  // 798c07c6       rldicr  r12, r12, 32, 31
  // 658c00c3       oris    r12, r12, 195
  // 618ccd40       ori     r12, r12, 52544
345 346 347
  return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
          (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
          ((instr5 >> 16) == 0x618C));
348 349 350 351 352 353 354
}
#else
// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
  // Check the instruction is indeed a two part load (into r12)
  // 3d802553       lis     r12, 9555
  // 618c5000       ori   r12, r12, 20480
355
  return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
356 357 358 359 360 361
}
#endif


bool Assembler::IsCmpRegister(Instr instr) {
  return (((instr & kOpcodeMask) == EXT2) &&
sampsong's avatar
sampsong committed
362
          ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
363 364 365 366 367 368 369 370
}


bool Assembler::IsRlwinm(Instr instr) {
  return ((instr & kOpcodeMask) == RLWINMX);
}


371 372 373
bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }


374 375 376
#if V8_TARGET_ARCH_PPC64
bool Assembler::IsRldicl(Instr instr) {
  return (((instr & kOpcodeMask) == EXT5) &&
sampsong's avatar
sampsong committed
377
          ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
378 379 380 381 382 383 384 385 386 387 388
}
#endif


bool Assembler::IsCmpImmediate(Instr instr) {
  return ((instr & kOpcodeMask) == CMPI);
}


bool Assembler::IsCrSet(Instr instr) {
  return (((instr & kOpcodeMask) == EXT1) &&
sampsong's avatar
sampsong committed
389
          ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
}


Register Assembler::GetCmpImmediateRegister(Instr instr) {
  DCHECK(IsCmpImmediate(instr));
  return GetRA(instr);
}


int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
  DCHECK(IsCmpImmediate(instr));
  return instr & kOff16Mask;
}


// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
// Bound labels refer to known positions in the already
// generated code. pos() is the position the label refers to.
//
// Linked labels refer to unknown positions in the code
// to be generated; pos() is the position of the last
// instruction using the label.


// The link chain is terminated by a negative code position (must be aligned)
const int kEndOfChain = -4;


420 421 422
// Dummy opcodes for unbound label mov instructions or jump table entries.
enum {
  kUnboundMovLabelOffsetOpcode = 0 << 26,
423
  kUnboundAddLabelOffsetOpcode = 1 << 26,
424 425 426
  kUnboundAddLabelLongOffsetOpcode = 2 << 26,
  kUnboundMovLabelAddrOpcode = 3 << 26,
  kUnboundJumpTableEntryOpcode = 4 << 26
427 428
};

429 430 431
int Assembler::target_at(int pos) {
  Instr instr = instr_at(pos);
  // check which type of branch this is 16 or 26 bit offset
sampsong's avatar
sampsong committed
432
  uint32_t opcode = instr & kOpcodeMask;
433 434 435 436 437 438 439 440 441 442 443
  int link;
  switch (opcode) {
    case BX:
      link = SIGN_EXT_IMM26(instr & kImm26Mask);
      link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
      break;
    case BCX:
      link = SIGN_EXT_IMM16((instr & kImm16Mask));
      link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
      break;
    case kUnboundMovLabelOffsetOpcode:
444
    case kUnboundAddLabelOffsetOpcode:
445
    case kUnboundAddLabelLongOffsetOpcode:
446 447 448 449 450 451 452 453
    case kUnboundMovLabelAddrOpcode:
    case kUnboundJumpTableEntryOpcode:
      link = SIGN_EXT_IMM26(instr & kImm26Mask);
      link <<= 2;
      break;
    default:
      DCHECK(false);
      return -1;
454 455
  }

456 457
  if (link == 0) return kEndOfChain;
  return pos + link;
458 459 460
}


461
void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
462
  Instr instr = instr_at(pos);
sampsong's avatar
sampsong committed
463
  uint32_t opcode = instr & kOpcodeMask;
464

465 466 467 468
  if (is_branch != nullptr) {
    *is_branch = (opcode == BX || opcode == BCX);
  }

469 470 471
  switch (opcode) {
    case BX: {
      int imm26 = target_pos - pos;
472
      CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
473 474 475 476 477 478 479 480 481
      if (imm26 == kInstrSize && !(instr & kLKMask)) {
        // Branch to next instr without link.
        instr = ORI;  // nop: ori, 0,0,0
      } else {
        instr &= ((~kImm26Mask) | kAAMask | kLKMask);
        instr |= (imm26 & kImm26Mask);
      }
      instr_at_put(pos, instr);
      break;
482
    }
483 484
    case BCX: {
      int imm16 = target_pos - pos;
485
      CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
486 487 488 489 490 491 492 493 494
      if (imm16 == kInstrSize && !(instr & kLKMask)) {
        // Branch to next instr without link.
        instr = ORI;  // nop: ori, 0,0,0
      } else {
        instr &= ((~kImm16Mask) | kAAMask | kLKMask);
        instr |= (imm16 & kImm16Mask);
      }
      instr_at_put(pos, instr);
      break;
495
    }
496 497 498 499 500
    case kUnboundMovLabelOffsetOpcode: {
      // Load the position of the label relative to the generated code object
      // pointer in a register.
      Register dst = Register::from_code(instr_at(pos + kInstrSize));
      int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
501
      PatchingAssembler patcher(options(),
502 503
                                reinterpret_cast<byte*>(buffer_start_ + pos),
                                2);
504
      patcher.bitwise_mov32(dst, offset);
505 506
      break;
    }
507
    case kUnboundAddLabelLongOffsetOpcode:
508 509 510
    case kUnboundAddLabelOffsetOpcode: {
      // dst = base + position + immediate
      Instr operands = instr_at(pos + kInstrSize);
511 512 513 514 515 516 517
      Register dst = Register::from_code((operands >> 27) & 0x1F);
      Register base = Register::from_code((operands >> 22) & 0x1F);
      int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
                          ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
                          : (SIGN_EXT_IMM22(operands & kImm22Mask));
      int32_t offset = target_pos + delta;
      PatchingAssembler patcher(
518
          options(), reinterpret_cast<byte*>(buffer_start_ + pos),
519
          2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
520
      patcher.bitwise_add32(dst, base, offset);
521
      if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
522 523
      break;
    }
524 525 526
    case kUnboundMovLabelAddrOpcode: {
      // Load the address of the label in a register.
      Register dst = Register::from_code(instr_at(pos + kInstrSize));
527
      PatchingAssembler patcher(options(),
528
                                reinterpret_cast<byte*>(buffer_start_ + pos),
529
                                kMovInstructionsNoConstantPool);
530
      // Keep internal references relative until EmitRelocations.
531
      patcher.bitwise_mov(dst, target_pos);
532 533 534
      break;
    }
    case kUnboundJumpTableEntryOpcode: {
535
      PatchingAssembler patcher(options(),
536
                                reinterpret_cast<byte*>(buffer_start_ + pos),
537
                                kPointerSize / kInstrSize);
538
      // Keep internal references relative until EmitRelocations.
539
      patcher.dp(target_pos);
540 541 542 543 544
      break;
    }
    default:
      DCHECK(false);
      break;
545 546 547 548 549 550
  }
}


int Assembler::max_reach_from(int pos) {
  Instr instr = instr_at(pos);
sampsong's avatar
sampsong committed
551
  uint32_t opcode = instr & kOpcodeMask;
552 553

  // check which type of branch this is 16 or 26 bit offset
554 555 556 557 558 559
  switch (opcode) {
    case BX:
      return 26;
    case BCX:
      return 16;
    case kUnboundMovLabelOffsetOpcode:
560
    case kUnboundAddLabelOffsetOpcode:
561 562 563
    case kUnboundMovLabelAddrOpcode:
    case kUnboundJumpTableEntryOpcode:
      return 0;  // no limit on reach
564 565 566 567 568 569 570 571 572 573
  }

  DCHECK(false);
  return 0;
}


void Assembler::bind_to(Label* L, int pos) {
  DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
  int32_t trampoline_pos = kInvalidSlotPos;
574
  bool is_branch = false;
575 576 577 578 579
  while (L->is_linked()) {
    int fixup_pos = L->pos();
    int32_t offset = pos - fixup_pos;
    int maxReach = max_reach_from(fixup_pos);
    next(L);  // call next before overwriting link with target at fixup_pos
580
    if (maxReach && is_intn(offset, maxReach) == false) {
581 582
      if (trampoline_pos == kInvalidSlotPos) {
        trampoline_pos = get_trampoline_entry();
583
        CHECK_NE(trampoline_pos, kInvalidSlotPos);
584 585 586 587
        target_at_put(trampoline_pos, pos);
      }
      target_at_put(fixup_pos, trampoline_pos);
    } else {
588
      target_at_put(fixup_pos, pos, &is_branch);
589 590 591 592
    }
  }
  L->bind_to(pos);

593 594 595 596
  if (!trampoline_emitted_ && is_branch) {
    UntrackBranch();
  }

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
  // Keep track of the last bound label so we don't eliminate any instructions
  // before a bound label.
  if (pos > last_bound_pos_) last_bound_pos_ = pos;
}


void Assembler::bind(Label* L) {
  DCHECK(!L->is_bound());  // label can only be bound once
  bind_to(L, pc_offset());
}


void Assembler::next(Label* L) {
  DCHECK(L->is_linked());
  int link = target_at(L->pos());
  if (link == kEndOfChain) {
    L->Unuse();
  } else {
615
    DCHECK_GE(link, 0);
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
    L->link_to(link);
  }
}


bool Assembler::is_near(Label* L, Condition cond) {
  DCHECK(L->is_bound());
  if (L->is_bound() == false) return false;

  int maxReach = ((cond == al) ? 26 : 16);
  int offset = L->pos() - pc_offset();

  return is_intn(offset, maxReach);
}


void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
                       DoubleRegister frb, RCBit r) {
  emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
}


void Assembler::d_form(Instr instr, Register rt, Register ra,
                       const intptr_t val, bool signed_disp) {
  if (signed_disp) {
    if (!is_int16(val)) {
      PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
    }
644
    CHECK(is_int16(val));
645 646 647 648 649 650
  } else {
    if (!is_uint16(val)) {
      PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
             ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
             val, val, is_uint16(val), kImm16Mask);
    }
651
    CHECK(is_uint16(val));
652 653 654 655 656 657 658 659 660 661 662
  }
  emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
}

void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
                        OEBit o, RCBit r) {
  emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
}

void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
                        int maskbit, RCBit r) {
663
  int sh0_4 = shift & 0x1F;
664
  int sh5 = (shift >> 5) & 0x1;
665
  int m0_4 = maskbit & 0x1F;
666 667 668 669 670 671 672 673 674
  int m5 = (maskbit >> 5) & 0x1;

  emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
       m5 * B5 | sh5 * B1 | r);
}


void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
                         int maskbit, RCBit r) {
675
  int m0_4 = maskbit & 0x1F;
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
  int m5 = (maskbit >> 5) & 0x1;

  emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
       m5 * B5 | r);
}


// Returns the next free trampoline entry.
int32_t Assembler::get_trampoline_entry() {
  int32_t trampoline_entry = kInvalidSlotPos;

  if (!internal_trampoline_exception_) {
    trampoline_entry = trampoline_.take_slot();

    if (kInvalidSlotPos == trampoline_entry) {
      internal_trampoline_exception_ = true;
    }
  }
  return trampoline_entry;
}


698 699
int Assembler::link(Label* L) {
  int position;
700
  if (L->is_bound()) {
701
    position = L->pos();
702 703
  } else {
    if (L->is_linked()) {
704
      position = L->pos();  // L's link
705 706
    } else {
      // was: target_pos = kEndOfChain;
707
      // However, using self to mark the first reference
708 709
      // should avoid most instances of branch offset overflow.  See
      // target_at() for where this is converted back to kEndOfChain.
710
      position = pc_offset();
711 712 713 714
    }
    L->link_to(pc_offset());
  }

715
  return position;
716 717 718 719 720 721
}


// Branch instructions.


722 723
void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
  emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
724 725 726
}


727 728
void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
  emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
729 730 731 732
}


// Pseudo op - branch to link register
733
void Assembler::blr() { bclr(BA, 0, LeaveLK); }
734 735 736


// Pseudo op - branch to count register -- used for "jump"
737
void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
738 739


740
void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
741 742 743


void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
744 745 746
  int imm16 = branch_offset;
  CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
  emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
747 748 749 750 751
}


void Assembler::b(int branch_offset, LKBit lk) {
  int imm26 = branch_offset;
752
  CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
753 754 755 756 757
  emit(BX | (imm26 & kImm26Mask) | lk);
}


void Assembler::xori(Register dst, Register src, const Operand& imm) {
758
  d_form(XORI, src, dst, imm.immediate(), false);
759 760 761 762
}


void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
763
  d_form(XORIS, rs, ra, imm.immediate(), false);
764 765 766 767 768
}


void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
                       RCBit rc) {
769 770 771
  sh &= 0x1F;
  mb &= 0x1F;
  me &= 0x1F;
772 773 774 775 776 777 778
  emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
       me << 1 | rc);
}


void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
                      RCBit rc) {
779 780
  mb &= 0x1F;
  me &= 0x1F;
781 782 783 784 785 786 787
  emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
       me << 1 | rc);
}


void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
                       RCBit rc) {
788 789 790
  sh &= 0x1F;
  mb &= 0x1F;
  me &= 0x1F;
791 792 793 794 795 796
  emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
       me << 1 | rc);
}


void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
797 798
  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
  rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
799 800 801 802
}


void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
803 804
  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
  rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
805 806 807 808 809
}


void Assembler::clrrwi(Register dst, Register src, const Operand& val,
                       RCBit rc) {
810 811
  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
  rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
812 813 814 815 816
}


void Assembler::clrlwi(Register dst, Register src, const Operand& val,
                       RCBit rc) {
817 818
  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
  rlwinm(dst, src, 0, val.immediate(), 31, rc);
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
}


void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
  rlwnm(ra, rs, rb, 0, 31, r);
}


void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
  rlwinm(ra, rs, sh, 0, 31, r);
}


void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
  rlwinm(ra, rs, 32 - sh, 0, 31, r);
}


void Assembler::subi(Register dst, Register src, const Operand& imm) {
838
  addi(dst, src, Operand(-(imm.immediate())));
839 840 841 842 843 844 845
}

void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
                     RCBit r) {
  xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
}

846 847 848 849
void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
                     RCBit r) {
  xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
}
850 851 852 853 854 855 856 857 858 859 860 861

void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
  // a special xo_form
  emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
}


void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
                    RCBit r) {
  xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
}

862 863
void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
                     RCBit r) {
864 865 866
  xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
}

867 868 869 870
void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
                     RCBit r) {
  xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
}
871 872

void Assembler::subfic(Register dst, Register src, const Operand& imm) {
873
  d_form(SUBFIC, dst, src, imm.immediate(), true);
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
}


void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
                    RCBit r) {
  xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
}


// Multiply low word
void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
                      RCBit r) {
  xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
}


// Multiply hi word
891 892 893 894 895 896 897 898
void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
  xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
}


// Multiply hi word unsigned
void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
  xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
899 900 901 902 903 904 905 906 907 908
}


// Divide word
void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
                     RCBit r) {
  xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
}


909 910 911 912 913 914 915
// Divide word unsigned
void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
                      RCBit r) {
  xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
}


916
void Assembler::addi(Register dst, Register src, const Operand& imm) {
917
  DCHECK(src != r0);  // use li instead to show intent
918
  d_form(ADDI, dst, src, imm.immediate(), true);
919 920 921 922
}


void Assembler::addis(Register dst, Register src, const Operand& imm) {
923
  DCHECK(src != r0);  // use lis instead to show intent
924
  d_form(ADDIS, dst, src, imm.immediate(), true);
925 926 927 928
}


void Assembler::addic(Register dst, Register src, const Operand& imm) {
929
  d_form(ADDIC, dst, src, imm.immediate(), true);
930 931 932 933
}


void Assembler::andi(Register ra, Register rs, const Operand& imm) {
934
  d_form(ANDIx, rs, ra, imm.immediate(), false);
935 936 937 938
}


void Assembler::andis(Register ra, Register rs, const Operand& imm) {
939
  d_form(ANDISx, rs, ra, imm.immediate(), false);
940 941 942 943
}


void Assembler::ori(Register ra, Register rs, const Operand& imm) {
944
  d_form(ORI, rs, ra, imm.immediate(), false);
945 946 947 948
}


void Assembler::oris(Register dst, Register src, const Operand& imm) {
949
  d_form(ORIS, src, dst, imm.immediate(), false);
950 951 952 953
}


void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
954
  intptr_t imm16 = src2.immediate();
955 956 957 958 959 960 961 962 963 964 965 966 967
#if V8_TARGET_ARCH_PPC64
  int L = 1;
#else
  int L = 0;
#endif
  DCHECK(is_int16(imm16));
  DCHECK(cr.code() >= 0 && cr.code() <= 7);
  imm16 &= kImm16Mask;
  emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
}


void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
968
  uintptr_t uimm16 = src2.immediate();
969 970 971 972 973 974 975 976 977 978 979 980 981
#if V8_TARGET_ARCH_PPC64
  int L = 1;
#else
  int L = 0;
#endif
  DCHECK(is_uint16(uimm16));
  DCHECK(cr.code() >= 0 && cr.code() <= 7);
  uimm16 &= kImm16Mask;
  emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
}


void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
982
  intptr_t imm16 = src2.immediate();
983
  int L = 0;
984
  int pos = pc_offset();
985 986 987
  DCHECK(is_int16(imm16));
  DCHECK(cr.code() >= 0 && cr.code() <= 7);
  imm16 &= kImm16Mask;
988 989 990 991 992 993 994

  // For cmpwi against 0, save postition and cr for later examination
  // of potential optimization.
  if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
    optimizable_cmpi_pos_ = pos;
    cmpi_cr_ = cr;
  }
995 996 997 998 999
  emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
}


void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
1000
  uintptr_t uimm16 = src2.immediate();
1001 1002 1003 1004 1005 1006 1007 1008
  int L = 0;
  DCHECK(is_uint16(uimm16));
  DCHECK(cr.code() >= 0 && cr.code() <= 7);
  uimm16 &= kImm16Mask;
  emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
}


1009 1010 1011 1012 1013 1014
void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
  emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
       cb * B6);
}


1015 1016
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
1017
  d_form(ADDI, dst, r0, imm.immediate(), true);
1018 1019 1020 1021
}


void Assembler::lis(Register dst, const Operand& imm) {
1022
  d_form(ADDIS, dst, r0, imm.immediate(), true);
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
}


// Pseudo op - move register
void Assembler::mr(Register dst, Register src) {
  // actually or(dst, src, src)
  orx(dst, src, src);
}


void Assembler::lbz(Register dst, const MemOperand& src) {
1034
  DCHECK(src.ra_ != r0);
1035 1036 1037 1038 1039
  d_form(LBZ, dst, src.ra(), src.offset(), true);
}


void Assembler::lhz(Register dst, const MemOperand& src) {
1040
  DCHECK(src.ra_ != r0);
1041 1042 1043 1044 1045
  d_form(LHZ, dst, src.ra(), src.offset(), true);
}


void Assembler::lwz(Register dst, const MemOperand& src) {
1046
  DCHECK(src.ra_ != r0);
1047 1048 1049 1050 1051
  d_form(LWZ, dst, src.ra(), src.offset(), true);
}


void Assembler::lwzu(Register dst, const MemOperand& src) {
1052
  DCHECK(src.ra_ != r0);
1053 1054 1055 1056
  d_form(LWZU, dst, src.ra(), src.offset(), true);
}


1057
void Assembler::lha(Register dst, const MemOperand& src) {
1058
  DCHECK(src.ra_ != r0);
1059 1060 1061 1062
  d_form(LHA, dst, src.ra(), src.offset(), true);
}


1063 1064 1065
void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
  int offset = src.offset();
1066
  DCHECK(src.ra_ != r0);
1067
  CHECK(!(offset & 3) && is_int16(offset));
1068 1069 1070 1071 1072 1073 1074 1075
  offset = kImm16Mask & offset;
  emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
#else
  lwz(dst, src);
#endif
}

void Assembler::stb(Register dst, const MemOperand& src) {
1076
  DCHECK(src.ra_ != r0);
1077 1078 1079 1080 1081
  d_form(STB, dst, src.ra(), src.offset(), true);
}


void Assembler::sth(Register dst, const MemOperand& src) {
1082
  DCHECK(src.ra_ != r0);
1083 1084 1085 1086 1087
  d_form(STH, dst, src.ra(), src.offset(), true);
}


void Assembler::stw(Register dst, const MemOperand& src) {
1088
  DCHECK(src.ra_ != r0);
1089 1090 1091 1092 1093
  d_form(STW, dst, src.ra(), src.offset(), true);
}


void Assembler::stwu(Register dst, const MemOperand& src) {
1094
  DCHECK(src.ra_ != r0);
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
  d_form(STWU, dst, src.ra(), src.offset(), true);
}


void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
  emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
}


#if V8_TARGET_ARCH_PPC64
// 64bit specific instructions
void Assembler::ld(Register rd, const MemOperand& src) {
  int offset = src.offset();
1108
  DCHECK(src.ra_ != r0);
1109
  CHECK(!(offset & 3) && is_int16(offset));
1110 1111 1112 1113 1114 1115 1116
  offset = kImm16Mask & offset;
  emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
}


void Assembler::ldu(Register rd, const MemOperand& src) {
  int offset = src.offset();
1117
  DCHECK(src.ra_ != r0);
1118
  CHECK(!(offset & 3) && is_int16(offset));
1119 1120 1121 1122 1123 1124 1125
  offset = kImm16Mask & offset;
  emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
}


void Assembler::std(Register rs, const MemOperand& src) {
  int offset = src.offset();
1126
  DCHECK(src.ra_ != r0);
1127
  CHECK(!(offset & 3) && is_int16(offset));
1128 1129 1130 1131 1132 1133 1134
  offset = kImm16Mask & offset;
  emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
}


void Assembler::stdu(Register rs, const MemOperand& src) {
  int offset = src.offset();
1135
  DCHECK(src.ra_ != r0);
1136
  CHECK(!(offset & 3) && is_int16(offset));
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
  offset = kImm16Mask & offset;
  emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
}


void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
  md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
}


void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
  md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
}


void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
  mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
}


void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
  md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
}


void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1163 1164
  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
  rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1165 1166 1167 1168
}


void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1169 1170
  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
  rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1171 1172 1173 1174 1175
}


void Assembler::clrrdi(Register dst, Register src, const Operand& val,
                       RCBit rc) {
1176 1177
  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
  rldicr(dst, src, 0, 63 - val.immediate(), rc);
1178 1179 1180 1181 1182
}


void Assembler::clrldi(Register dst, Register src, const Operand& val,
                       RCBit rc) {
1183 1184
  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
  rldicl(dst, src, 0, val.immediate(), rc);
1185 1186 1187 1188 1189 1190 1191 1192 1193
}


void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
  md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
}


void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1194
  int sh0_4 = sh & 0x1F;
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
  int sh5 = (sh >> 5) & 0x1;

  emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
       sh5 * B1 | r);
}


void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
  rldcl(ra, rs, rb, 0, r);
}


void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
  rldicl(ra, rs, sh, 0, r);
}


void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
  rldicl(ra, rs, 64 - sh, 0, r);
}


void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
                      RCBit r) {
  xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
}


void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
                     RCBit r) {
  xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
}


1229 1230 1231
void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
                      RCBit r) {
  xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1232
}
1233
#endif
1234 1235 1236 1237 1238 1239


// Function descriptor for AIX.
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
1240 1241
  if (ABI_USES_FUNCTION_DESCRIPTORS) {
    Label instructions;
1242
    DCHECK_EQ(pc_offset(), 0);
1243 1244 1245 1246 1247
    emit_label_addr(&instructions);
    dp(0);
    dp(0);
    bind(&instructions);
  }
1248 1249 1250
}


1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
int Assembler::instructions_required_for_mov(Register dst,
                                             const Operand& src) const {
  bool canOptimize =
      !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
  if (use_constant_pool_for_mov(dst, src, canOptimize)) {
    if (ConstantPoolAccessIsInOverflow()) {
      return kMovInstructionsConstantPool + 1;
    }
    return kMovInstructionsConstantPool;
  }
  DCHECK(!canOptimize);
  return kMovInstructionsNoConstantPool;
}


bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
                                          bool canOptimize) const {
  if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
    // If there is no constant pool available, we must use a mov
    // immediate sequence.
    return false;
  }
  intptr_t value = src.immediate();
#if V8_TARGET_ARCH_PPC64
1275
  bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1276
#else
1277
  bool allowOverflow = !(canOptimize || dst == r0);
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
#endif
  if (canOptimize && is_int16(value)) {
    // Prefer a single-instruction load-immediate.
    return false;
  }
  if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
    // Prefer non-relocatable two-instruction bitwise-mov32 over
    // overflow sequence.
    return false;
  }

  return true;
}


1293 1294
void Assembler::EnsureSpaceFor(int space_needed) {
  if (buffer_space() <= (kGap + space_needed)) {
1295
    GrowBuffer(space_needed);
1296 1297 1298 1299 1300 1301
  }
}


bool Operand::must_output_reloc_info(const Assembler* assembler) const {
  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1302
    if (assembler != nullptr && assembler->predictable_code_size()) return true;
1303
    return assembler->options().record_reloc_info_for_serialization;
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
  } else if (RelocInfo::IsNone(rmode_)) {
    return false;
  }
  return true;
}


// Primarily used for loading constants
// This should really move to be in macro-assembler as it
// is really a pseudo instruction
// Some usages of this intend for a FIXED_SEQUENCE to be used
// Todo - break this dependency so we can optimize mov() in general
// and only use the generic version when we require a fixed sequence
void Assembler::mov(Register dst, const Operand& src) {
1318 1319 1320 1321 1322 1323 1324
  intptr_t value;
  if (src.IsHeapObjectRequest()) {
    RequestHeapObject(src.heap_object_request());
    value = 0;
  } else {
    value = src.immediate();
  }
1325
  bool relocatable = src.must_output_reloc_info(this);
1326 1327
  bool canOptimize;

1328 1329
  canOptimize =
      !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1330

1331 1332
  if (!src.IsHeapObjectRequest() &&
      use_constant_pool_for_mov(dst, src, canOptimize)) {
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
    DCHECK(is_constant_pool_available());
    if (relocatable) {
      RecordRelocInfo(src.rmode_);
    }
    ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
#if V8_TARGET_ARCH_PPC64
    if (access == ConstantPoolEntry::OVERFLOWED) {
      addis(dst, kConstantPoolRegister, Operand::Zero());
      ld(dst, MemOperand(dst, 0));
    } else {
      ld(dst, MemOperand(kConstantPoolRegister, 0));
    }
#else
    if (access == ConstantPoolEntry::OVERFLOWED) {
      addis(dst, kConstantPoolRegister, Operand::Zero());
      lwz(dst, MemOperand(dst, 0));
    } else {
      lwz(dst, MemOperand(kConstantPoolRegister, 0));
    }
#endif
    return;
  }

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
  if (canOptimize) {
    if (is_int16(value)) {
      li(dst, Operand(value));
    } else {
      uint16_t u16;
#if V8_TARGET_ARCH_PPC64
      if (is_int32(value)) {
#endif
        lis(dst, Operand(value >> 16));
#if V8_TARGET_ARCH_PPC64
      } else {
        if (is_int48(value)) {
          li(dst, Operand(value >> 32));
        } else {
          lis(dst, Operand(value >> 48));
1371
          u16 = ((value >> 32) & 0xFFFF);
1372 1373 1374 1375 1376
          if (u16) {
            ori(dst, dst, Operand(u16));
          }
        }
        sldi(dst, dst, Operand(32));
1377
        u16 = ((value >> 16) & 0xFFFF);
1378 1379 1380 1381 1382
        if (u16) {
          oris(dst, dst, Operand(u16));
        }
      }
#endif
1383
      u16 = (value & 0xFFFF);
1384 1385 1386 1387 1388 1389 1390 1391
      if (u16) {
        ori(dst, dst, Operand(u16));
      }
    }
    return;
  }

  DCHECK(!canOptimize);
1392 1393
  if (relocatable) {
    RecordRelocInfo(src.rmode_);
1394
  }
1395 1396
  bitwise_mov(dst, value);
}
1397

1398 1399

void Assembler::bitwise_mov(Register dst, intptr_t value) {
1400 1401 1402 1403 1404
    BlockTrampolinePoolScope block_trampoline_pool(this);
#if V8_TARGET_ARCH_PPC64
    int32_t hi_32 = static_cast<int32_t>(value >> 32);
    int32_t lo_32 = static_cast<int32_t>(value);
    int hi_word = static_cast<int>(hi_32 >> 16);
1405
    int lo_word = static_cast<int>(hi_32 & 0xFFFF);
1406 1407 1408
    lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
    ori(dst, dst, Operand(lo_word));
    sldi(dst, dst, Operand(32));
1409 1410
    hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
    lo_word = static_cast<int>(lo_32 & 0xFFFF);
1411 1412 1413 1414
    oris(dst, dst, Operand(hi_word));
    ori(dst, dst, Operand(lo_word));
#else
    int hi_word = static_cast<int>(value >> 16);
1415
    int lo_word = static_cast<int>(value & 0xFFFF);
1416 1417 1418
    lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
    ori(dst, dst, Operand(lo_word));
#endif
1419 1420 1421 1422 1423 1424
}


void Assembler::bitwise_mov32(Register dst, int32_t value) {
  BlockTrampolinePoolScope block_trampoline_pool(this);
  int hi_word = static_cast<int>(value >> 16);
1425
  int lo_word = static_cast<int>(value & 0xFFFF);
1426 1427
  lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
  ori(dst, dst, Operand(lo_word));
1428 1429 1430
}


1431 1432 1433 1434 1435 1436 1437
void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
  BlockTrampolinePoolScope block_trampoline_pool(this);
  if (is_int16(value)) {
    addi(dst, src, Operand(value));
    nop();
  } else {
    int hi_word = static_cast<int>(value >> 16);
1438
    int lo_word = static_cast<int>(value & 0xFFFF);
1439 1440 1441 1442 1443 1444 1445
    if (lo_word & 0x8000) hi_word++;
    addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
    addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
  }
}


1446
void Assembler::mov_label_offset(Register dst, Label* label) {
1447
  int position = link(label);
1448
  if (label->is_bound()) {
1449 1450
    // Load the position of the label relative to the generated code object.
    mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1451
  } else {
1452 1453 1454 1455 1456 1457 1458
    // Encode internal reference to unbound label. We use a dummy opcode
    // such that it won't collide with any opcode that might appear in the
    // label's chain.  Encode the destination register in the 2nd instruction.
    int link = position - pc_offset();
    DCHECK_EQ(0, link & 3);
    link >>= 2;
    DCHECK(is_int26(link));
1459 1460 1461 1462 1463 1464

    // When the label is bound, these instructions will be patched
    // with a 2 instruction mov sequence that will load the
    // destination register with the position of the label from the
    // beginning of the code.
    //
1465 1466 1467 1468 1469 1470 1471 1472
    // target_at extracts the link and target_at_put patches the instructions.
    BlockTrampolinePoolScope block_trampoline_pool(this);
    emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
    emit(dst.code());
  }
}


1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
void Assembler::add_label_offset(Register dst, Register base, Label* label,
                                 int delta) {
  int position = link(label);
  if (label->is_bound()) {
    // dst = base + position + delta
    position += delta;
    bitwise_add32(dst, base, position);
  } else {
    // Encode internal reference to unbound label. We use a dummy opcode
    // such that it won't collide with any opcode that might appear in the
    // label's chain.  Encode the operands in the 2nd instruction.
    int link = position - pc_offset();
    DCHECK_EQ(0, link & 3);
    link >>= 2;
    DCHECK(is_int26(link));
    BlockTrampolinePoolScope block_trampoline_pool(this);
1489 1490 1491 1492 1493 1494 1495 1496 1497

    emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
                          : kUnboundAddLabelLongOffsetOpcode) |
         (link & kImm26Mask));
    emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));

    if (!is_int22(delta)) {
      emit(delta);
    }
1498 1499 1500 1501
  }
}


1502 1503 1504 1505 1506
void Assembler::mov_label_addr(Register dst, Label* label) {
  CheckBuffer();
  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
  int position = link(label);
  if (label->is_bound()) {
1507 1508
    // Keep internal references relative until EmitRelocations.
    bitwise_mov(dst, position);
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
  } else {
    // Encode internal reference to unbound label. We use a dummy opcode
    // such that it won't collide with any opcode that might appear in the
    // label's chain.  Encode the destination register in the 2nd instruction.
    int link = position - pc_offset();
    DCHECK_EQ(0, link & 3);
    link >>= 2;
    DCHECK(is_int26(link));

    // When the label is bound, these instructions will be patched
    // with a multi-instruction mov sequence that will load the
    // destination register with the address of the label.
    //
    // target_at extracts the link and target_at_put patches the instructions.
    BlockTrampolinePoolScope block_trampoline_pool(this);
    emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
    emit(dst.code());
1526
    DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1527
    for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1528 1529 1530 1531 1532 1533 1534 1535 1536
  }
}


void Assembler::emit_label_addr(Label* label) {
  CheckBuffer();
  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
  int position = link(label);
  if (label->is_bound()) {
1537
    // Keep internal references relative until EmitRelocations.
1538
    dp(position);
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
  } else {
    // Encode internal reference to unbound label. We use a dummy opcode
    // such that it won't collide with any opcode that might appear in the
    // label's chain.
    int link = position - pc_offset();
    DCHECK_EQ(0, link & 3);
    link >>= 2;
    DCHECK(is_int26(link));

    // When the label is bound, the instruction(s) will be patched
    // as a jump table entry containing the label address.  target_at extracts
    // the link and target_at_put patches the instruction(s).
1551
    BlockTrampolinePoolScope block_trampoline_pool(this);
1552 1553
    emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
#if V8_TARGET_ARCH_PPC64
1554
    nop();
1555
#endif
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
  }
}


// Special register instructions
void Assembler::crxor(int bt, int ba, int bb) {
  emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
}


void Assembler::creqv(int bt, int ba, int bb) {
  emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
}


void Assembler::mflr(Register dst) {
  emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
}


void Assembler::mtlr(Register src) {
  emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
}


void Assembler::mtctr(Register src) {
  emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
}


void Assembler::mtxer(Register src) {
  emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
}


1591
void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1592
  DCHECK_LT(static_cast<int>(bit), 32);
1593 1594
  int bf = cr.code();
  int bfa = bit / CRWIDTH;
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
  emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
}


void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }


#if V8_TARGET_ARCH_PPC64
void Assembler::mffprd(Register dst, DoubleRegister src) {
  emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
}


void Assembler::mffprwz(Register dst, DoubleRegister src) {
  emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
}


void Assembler::mtfprd(DoubleRegister dst, Register src) {
  emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
}


void Assembler::mtfprwz(DoubleRegister dst, Register src) {
  emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
}


void Assembler::mtfprwa(DoubleRegister dst, Register src) {
  emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
}
#endif


// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-ppc.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code,
                     CRegister cr) {
  if (cond != al) {
    Label skip;
    b(NegateCondition(cond), &skip, cr);
    bkpt(0);
    bind(&skip);
  } else {
    bkpt(0);
  }
}

1644
void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669

void Assembler::dcbf(Register ra, Register rb) {
  emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
}


void Assembler::sync() { emit(EXT2 | SYNC); }


void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }


void Assembler::icbi(Register ra, Register rb) {
  emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
}


void Assembler::isync() { emit(EXT1 | ISYNC); }


// Floating point support

void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1670
  DCHECK(ra != r0);
1671
  CHECK(is_int16(offset));
1672 1673 1674 1675 1676 1677 1678 1679 1680
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1681
  DCHECK(ra != r0);
1682
  CHECK(is_int16(offset));
1683 1684 1685 1686 1687 1688 1689 1690 1691
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1692
  CHECK(is_int16(offset));
1693
  DCHECK(ra != r0);
1694 1695 1696 1697 1698 1699 1700 1701 1702
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1703
  CHECK(is_int16(offset));
1704
  DCHECK(ra != r0);
1705 1706 1707 1708 1709 1710 1711 1712 1713
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1714
  CHECK(is_int16(offset));
1715
  DCHECK(ra != r0);
1716 1717 1718 1719 1720 1721 1722 1723 1724
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1725
  CHECK(is_int16(offset));
1726
  DCHECK(ra != r0);
1727 1728 1729 1730 1731 1732 1733 1734 1735
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1736
  CHECK(is_int16(offset));
1737
  DCHECK(ra != r0);
1738 1739 1740 1741 1742 1743 1744 1745 1746
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
  int offset = src.offset();
  Register ra = src.ra();
1747
  CHECK(is_int16(offset));
1748
  DCHECK(ra != r0);
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
  int imm16 = offset & kImm16Mask;
  // could be x_form instruction with some casting magic
  emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
}


void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
                     const DoubleRegister frb, RCBit rc) {
  a_form(EXT4 | FSUB, frt, fra, frb, rc);
}


void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
                     const DoubleRegister frb, RCBit rc) {
  a_form(EXT4 | FADD, frt, fra, frb, rc);
}


void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
                     const DoubleRegister frc, RCBit rc) {
  emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
       rc);
}


void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
                     const DoubleRegister frb, RCBit rc) {
  a_form(EXT4 | FDIV, frt, fra, frb, rc);
}


void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
                      CRegister cr) {
  DCHECK(cr.code() >= 0 && cr.code() <= 7);
  emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
}


void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
                    RCBit rc) {
  emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
  emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
}


void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
  emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
}


1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
}


void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
                      RCBit rc) {
  emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
}


1839 1840 1841 1842 1843 1844
void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
                       RCBit rc) {
  emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
}


1845 1846
void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
                        RCBit rc) {
sampsong's avatar
sampsong committed
1847
  emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1848 1849 1850
}


1851 1852
void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
                       RCBit rc) {
sampsong's avatar
sampsong committed
1853
  emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1854 1855 1856
}


1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
                      RCBit rc) {
  emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
                       RCBit rc) {
  emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
}


1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
                       RCBit rc) {
  emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
                        RCBit rc) {
  emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
}


1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
                     const DoubleRegister frc, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
       frc.code() * B6 | rc);
}


void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
}


1895
void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1896
  DCHECK_LT(static_cast<int>(bit), 32);
1897 1898 1899 1900 1901 1902
  int bt = bit;
  emit(EXT4 | MTFSB0 | bt * B21 | rc);
}


void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1903
  DCHECK_LT(static_cast<int>(bit), 32);
1904 1905 1906 1907 1908
  int bt = bit;
  emit(EXT4 | MTFSB1 | bt * B21 | rc);
}


1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
  emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
}


void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
  emit(EXT4 | MFFS | frt.code() * B21 | rc);
}


void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
                      RCBit rc) {
  emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
}


void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
                      RCBit rc) {
  emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
                     RCBit rc) {
  emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
}


void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
                      const DoubleRegister frc, const DoubleRegister frb,
                      RCBit rc) {
  emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
       frc.code() * B6 | rc);
}


void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
                      const DoubleRegister frc, const DoubleRegister frb,
                      RCBit rc) {
  emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
       frc.code() * B6 | rc);
}

// Pseudo instructions.
void Assembler::nop(int type) {
  Register reg = r0;
  switch (type) {
    case NON_MARKING_NOP:
      reg = r0;
      break;
    case GROUP_ENDING_NOP:
      reg = r2;
      break;
    case DEBUG_BREAK_NOP:
      reg = r3;
      break;
    default:
      UNIMPLEMENTED();
  }

  ori(reg, reg, Operand::Zero());
}


bool Assembler::IsNop(Instr instr, int type) {
  int reg = 0;
  switch (type) {
    case NON_MARKING_NOP:
      reg = 0;
      break;
    case GROUP_ENDING_NOP:
      reg = 2;
      break;
    case DEBUG_BREAK_NOP:
      reg = 3;
      break;
    default:
      UNIMPLEMENTED();
  }
  return instr == (ORI | reg * B21 | reg * B16);
}


1992
void Assembler::GrowBuffer(int needed) {
1993
  DCHECK_EQ(buffer_start_, buffer_->start());
1994 1995

  // Compute new buffer size.
1996 1997 1998 1999
  int old_size = buffer_->size();
  int new_size = std::min(2 * old_size, old_size + 1 * MB);
  int space = buffer_space() + (new_size - old_size);
  new_size += (space < needed) ? needed - space : 0;
2000 2001 2002

  // Some internal data structures overflow for very large buffers,
  // they must ensure that kMaximalBufferSize is not too large.
2003
  if (new_size > kMaximalBufferSize) {
2004
    V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
2005
  }
2006 2007

  // Set up new buffer.
2008 2009 2010
  std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
  DCHECK_EQ(new_size, new_buffer->size());
  byte* new_start = new_buffer->start();
2011 2012

  // Copy the data.
2013 2014 2015 2016 2017 2018
  intptr_t pc_delta = new_start - buffer_start_;
  intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
  size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
  MemMove(new_start, buffer_start_, pc_offset());
  MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
          reloc_size);
2019 2020

  // Switch buffers.
2021 2022
  buffer_ = std::move(new_buffer);
  buffer_start_ = new_start;
2023 2024 2025 2026
  pc_ += pc_delta;
  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                               reloc_info_writer.last_pc() + pc_delta);

2027 2028 2029
  // None of our relocation types are pc relative pointing outside the code
  // buffer nor pc absolute pointing inside the code buffer, so there is no need
  // to relocate any emitted relocation entries.
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
}


void Assembler::db(uint8_t data) {
  CheckBuffer();
  *reinterpret_cast<uint8_t*>(pc_) = data;
  pc_ += sizeof(uint8_t);
}


void Assembler::dd(uint32_t data) {
  CheckBuffer();
  *reinterpret_cast<uint32_t*>(pc_) = data;
  pc_ += sizeof(uint32_t);
}


2047
void Assembler::dq(uint64_t value) {
2048
  CheckBuffer();
2049 2050
  *reinterpret_cast<uint64_t*>(pc_) = value;
  pc_ += sizeof(uint64_t);
2051 2052 2053
}


2054
void Assembler::dp(uintptr_t data) {
2055
  CheckBuffer();
2056 2057
  *reinterpret_cast<uintptr_t*>(pc_) = data;
  pc_ += sizeof(uintptr_t);
2058 2059 2060
}


2061
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2062
  if (!ShouldRecordRelocInfo(rmode)) return;
2063 2064
  DeferredRelocInfo rinfo(pc_offset(), rmode, data);
  relocations_.push_back(rinfo);
2065 2066 2067 2068 2069 2070 2071 2072 2073
}


void Assembler::EmitRelocations() {
  EnsureSpaceFor(relocations_.size() * kMaxRelocSize);

  for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
       it != relocations_.end(); it++) {
    RelocInfo::Mode rmode = it->rmode();
2074
    Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
2075
    RelocInfo rinfo(pc, rmode, it->data(), Code());
2076 2077

    // Fix up internal references now that they are guaranteed to be bound.
2078 2079
    if (RelocInfo::IsInternalReference(rmode)) {
      // Jump table entry
2080
      intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
2081
      Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
2082 2083
    } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
      // mov sequence
2084
      intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
2085 2086
      set_target_address_at(pc, 0,
                            reinterpret_cast<Address>(buffer_start_) + pos,
2087
                            SKIP_ICACHE_FLUSH);
2088
    }
2089 2090

    reloc_info_writer.Write(&rinfo);
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
  }
}


void Assembler::BlockTrampolinePoolFor(int instructions) {
  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
}


void Assembler::CheckTrampolinePool() {
  // Some small sequences of instructions must not be broken up by the
  // insertion of a trampoline pool; such sequences are protected by setting
  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
  // which are both checked here. Also, recursive calls to CheckTrampolinePool
  // are blocked by trampoline_pool_blocked_nesting_.
2106 2107 2108
  if (trampoline_pool_blocked_nesting_ > 0) return;
  if (pc_offset() < no_trampoline_pool_before_) {
    next_trampoline_check_ = no_trampoline_pool_before_;
2109 2110 2111 2112
    return;
  }

  DCHECK(!trampoline_emitted_);
2113 2114 2115 2116 2117 2118 2119
  if (tracked_branch_count_ > 0) {
    int size = tracked_branch_count_ * kInstrSize;

    // As we are only going to emit trampoline once, we need to prevent any
    // further emission.
    trampoline_emitted_ = true;
    next_trampoline_check_ = kMaxInt;
2120

2121 2122 2123 2124
    // First we emit jump, then we emit trampoline pool.
    b(size + kInstrSize, LeaveLK);
    for (int i = size; i > 0; i -= kInstrSize) {
      b(i, LeaveLK);
2125
    }
2126 2127

    trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
2128 2129 2130
  }
}

2131 2132
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
                                     byte* address, int instructions)
2133 2134 2135
    : Assembler(options, ExternalAssemblerBuffer(
                             address, instructions * kInstrSize + kGap)) {
  DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
2136 2137 2138 2139
}

PatchingAssembler::~PatchingAssembler() {
  // Check that the code was patched as expected.
2140 2141
  DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
  DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
2142 2143
}

2144 2145
}  // namespace internal
}  // namespace v8
2146 2147

#endif  // V8_TARGET_ARCH_PPC