instructions-arm64.cc 14.7 KB
Newer Older
1
// Copyright 2013 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#if V8_TARGET_ARCH_ARM64
6

7
#include "src/arm64/assembler-arm64-inl.h"
8
#include "src/arm64/instructions-arm64.h"
9 10 11 12 13 14 15 16 17 18 19 20

namespace v8 {
namespace internal {

bool Instruction::IsLoad() const {
  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
    return false;
  }

  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
    return Mask(LoadStorePairLBit) != 0;
  } else {
21
    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
22 23 24 25 26 27 28 29 30 31
    switch (op) {
      case LDRB_w:
      case LDRH_w:
      case LDR_w:
      case LDR_x:
      case LDRSB_w:
      case LDRSB_x:
      case LDRSH_w:
      case LDRSH_x:
      case LDRSW_x:
32 33
      case LDR_b:
      case LDR_h:
34
      case LDR_s:
35 36 37
      case LDR_d:
      case LDR_q:
        return true;
38 39 40 41 42 43 44 45 46 47 48 49 50 51
      default: return false;
    }
  }
}


bool Instruction::IsStore() const {
  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
    return false;
  }

  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
    return Mask(LoadStorePairLBit) == 0;
  } else {
52
    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
53 54 55 56 57
    switch (op) {
      case STRB_w:
      case STRH_w:
      case STR_w:
      case STR_x:
58 59
      case STR_b:
      case STR_h:
60
      case STR_s:
61 62 63
      case STR_d:
      case STR_q:
        return true;
64 65 66 67 68 69 70 71 72
      default: return false;
    }
  }
}


static uint64_t RotateRight(uint64_t value,
                            unsigned int rotate,
                            unsigned int width) {
73
  DCHECK_LE(width, 64);
74
  rotate &= 63;
75
  return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
76 77 78 79 80 81 82
         (value >> rotate);
}


static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
                                    uint64_t value,
                                    unsigned width) {
83
  DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
84
         (width == 32));
85
  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
86
  uint64_t result = value & ((1ULL << width) - 1ULL);
87 88 89 90 91 92 93 94 95 96 97
  for (unsigned i = width; i < reg_size; i *= 2) {
    result |= (result << i);
  }
  return result;
}


// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
98
  unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
99 100 101
  int32_t n = BitN();
  int32_t imm_s = ImmSetBits();
  int32_t imm_r = ImmRotate();
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123

  // An integer is constructed from the n, imm_s and imm_r bits according to
  // the following table:
  //
  //  N   imms    immr    size        S             R
  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
  // (s bits must not be all set)
  //
  // A pattern is constructed of size bits, where the least significant S+1
  // bits are set. The pattern is rotated right by R, and repeated across a
  // 32 or 64-bit value, depending on destination register width.
  //

  if (n == 1) {
    if (imm_s == 0x3F) {
      return 0;
    }
124
    uint64_t bits = (1ULL << (imm_s + 1)) - 1;
125 126 127 128 129 130 131 132 133 134 135
    return RotateRight(bits, imm_r, 64);
  } else {
    if ((imm_s >> 1) == 0x1F) {
      return 0;
    }
    for (int width = 0x20; width >= 0x2; width >>= 1) {
      if ((imm_s & width) == 0) {
        int mask = width - 1;
        if ((imm_s & mask) == mask) {
          return 0;
        }
136
        uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
137 138 139 140 141 142 143 144 145
        return RepeatBitsAcrossReg(reg_size,
                                   RotateRight(bits, imm_r & mask, width),
                                   width);
      }
    }
  }
  UNREACHABLE();
}

146 147
uint32_t Instruction::ImmNEONabcdefgh() const {
  return ImmNEONabc() << 5 | ImmNEONdefgh();
148
}
149

150 151 152
float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }

double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
153

154
float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
155

156 157
double Instruction::ImmNEONFP64() const {
  return Imm8ToFP64(ImmNEONabcdefgh());
158
}
159

160 161
unsigned CalcLSDataSize(LoadStoreOp op) {
  DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
162
            kInstrSize * 8);
163 164 165 166 167 168 169 170 171 172
  unsigned size = static_cast<Instr>(op) >> LSSize_offset;
  if ((op & LSVector_mask) != 0) {
    // Vector register memory operations encode the access size in the "size"
    // and "opc" fields.
    if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
      size = kQRegSizeLog2;
    }
  }
  return size;
}
173

174 175 176
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
  static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
  static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
177
  switch (op) {
178 179 180
    case STP_q:
    case LDP_q:
      return kQRegSizeLog2;
181 182 183
    case STP_x:
    case LDP_x:
    case STP_d:
184 185 186 187
    case LDP_d:
      return kXRegSizeLog2;
    default:
      return kWRegSizeLog2;
188 189 190 191
  }
}


192 193
int64_t Instruction::ImmPCOffset() {
  int64_t offset;
194 195 196 197 198 199
  if (IsPCRelAddressing()) {
    // PC-relative addressing. Only ADR is supported.
    offset = ImmPCRel();
  } else if (BranchType() != UnknownBranchType) {
    // All PC-relative branches.
    // Relative branch offsets are instruction-size-aligned.
200
    offset = ImmBranch() << kInstrSizeLog2;
201 202
  } else if (IsUnresolvedInternalReference()) {
    // Internal references are always word-aligned.
203
    offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
204 205
  } else {
    // Load literal (offset from PC).
206
    DCHECK(IsLdrLiteral());
207 208
    // The offset is always shifted by 2 bits, even for loads to 64-bits
    // registers.
209
    offset = ImmLLiteral() << kInstrSizeLog2;
210 211 212 213 214 215
  }
  return offset;
}


Instruction* Instruction::ImmPCOffsetTarget() {
216
  return InstructionAtOffset(ImmPCOffset());
217 218 219
}


220
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
221
                                     ptrdiff_t offset) {
222 223 224 225 226
  return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}


bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
227
  return IsValidImmPCOffset(BranchType(), DistanceTo(target));
228 229
}

230
void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options,
231
                                       Instruction* target) {
232
  if (IsPCRelAddressing()) {
233
    SetPCRelImmTarget(options, target);
234 235
  } else if (BranchType() != UnknownBranchType) {
    SetBranchImmTarget(target);
236
  } else if (IsUnresolvedInternalReference()) {
237
    SetUnresolvedInternalReferenceImmTarget(options, target);
238
  } else {
239
    // Load literal (offset from PC).
240 241 242 243
    SetImmLLiteral(target);
  }
}

244
void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
245
                                    Instruction* target) {
246
  // ADRP is not supported, so 'this' must point to an ADR instruction.
247
  DCHECK(IsAdr());
248

249
  ptrdiff_t target_offset = DistanceTo(target);
250 251
  Instr imm;
  if (Instruction::IsValidPCRelOffset(target_offset)) {
252
    imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
253 254
    SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
  } else {
255
    PatchingAssembler patcher(options, reinterpret_cast<byte*>(this),
256
                              PatchingAssembler::kAdrFarPatchableNInstrs);
257
    patcher.PatchAdrFar(target_offset);
258
  }
259 260 261 262
}


void Instruction::SetBranchImmTarget(Instruction* target) {
263 264 265 266
  DCHECK(IsAligned(DistanceTo(target), kInstrSize));
  DCHECK(
      IsValidImmPCOffset(BranchType(), DistanceTo(target) >> kInstrSizeLog2));
  int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
  Instr branch_imm = 0;
  uint32_t imm_mask = 0;
  switch (BranchType()) {
    case CondBranchType: {
      branch_imm = Assembler::ImmCondBranch(offset);
      imm_mask = ImmCondBranch_mask;
      break;
    }
    case UncondBranchType: {
      branch_imm = Assembler::ImmUncondBranch(offset);
      imm_mask = ImmUncondBranch_mask;
      break;
    }
    case CompareBranchType: {
      branch_imm = Assembler::ImmCmpBranch(offset);
      imm_mask = ImmCmpBranch_mask;
      break;
    }
    case TestBranchType: {
      branch_imm = Assembler::ImmTestBranch(offset);
      imm_mask = ImmTestBranch_mask;
      break;
    }
    default: UNREACHABLE();
  }
  SetInstructionBits(Mask(~imm_mask) | branch_imm);
}

295
void Instruction::SetUnresolvedInternalReferenceImmTarget(
296
    const AssemblerOptions& options, Instruction* target) {
297
  DCHECK(IsUnresolvedInternalReference());
298 299
  DCHECK(IsAligned(DistanceTo(target), kInstrSize));
  DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
300
  int32_t target_offset =
301
      static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
302 303 304
  uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
  uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);

305
  PatchingAssembler patcher(options, reinterpret_cast<byte*>(this), 2);
306 307 308 309 310
  patcher.brk(high16);
  patcher.brk(low16);
}


311
void Instruction::SetImmLLiteral(Instruction* source) {
312
  DCHECK(IsLdrLiteral());
313
  DCHECK(IsAligned(DistanceTo(source), kInstrSize));
314 315 316
  DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
  Instr imm = Assembler::ImmLLiteral(
      static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
317 318 319 320 321 322 323 324
  Instr mask = ImmLLiteral_mask;

  SetInstructionBits(Mask(~mask) | imm);
}


// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
325
// instructions-arm64-inl.h to work around this.
326 327 328
bool InstructionSequence::IsInlineData() const {
  // Inline data is encoded as a single movz instruction which writes to xzr
  // (x31).
329
  return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
330 331 332 333 334 335 336
  // TODO(all): If we extend ::InlineData() to support bigger data, we need
  // to update this method too.
}


// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
337
// instructions-arm64-inl.h to work around this.
338
uint64_t InstructionSequence::InlineData() const {
339
  DCHECK(IsInlineData());
340 341 342 343 344 345
  uint64_t payload = ImmMoveWide();
  // TODO(all): If we extend ::InlineData() to support bigger data, we need
  // to update this method too.
  return payload;
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(IntegerFormatMap());
}

NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
                                     const NEONFormatMap* format) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(format);
}

NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
                                     const NEONFormatMap* format0,
                                     const NEONFormatMap* format1) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(format0, format1);
}

NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
                                     const NEONFormatMap* format0,
                                     const NEONFormatMap* format1,
                                     const NEONFormatMap* format2) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(format0, format1, format2);
}

void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
                                      const NEONFormatMap* format1,
                                      const NEONFormatMap* format2) {
  DCHECK_NOT_NULL(format0);
  formats_[0] = format0;
377 378
  formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
  formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
379 380 381 382 383 384 385 386
}

void NEONFormatDecoder::SetFormatMap(unsigned index,
                                     const NEONFormatMap* format) {
  DCHECK_LT(index, arraysize(formats_));
  DCHECK_NOT_NULL(format);
  formats_[index] = format;
}
387

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
  return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
}

const char* NEONFormatDecoder::Substitute(const char* string,
                                          SubstitutionMode mode0,
                                          SubstitutionMode mode1,
                                          SubstitutionMode mode2) {
  snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
           GetSubstitute(1, mode1), GetSubstitute(2, mode2));
  return form_buffer_;
}

const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
  if ((instrbits_ & NEON_Q) != 0) {
    snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
    return mne_buffer_;
  }
  return mnemonic;
}

VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
  return GetVectorFormat(formats_[format_index]);
}

VectorFormat NEONFormatDecoder::GetVectorFormat(
    const NEONFormatMap* format_map) {
  static const VectorFormat vform[] = {
      kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
      kFormat2S,        kFormat4S, kFormat1D,  kFormat2D, kFormatB,
      kFormatH,         kFormatS,  kFormatD};
  DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
  return vform[GetNEONFormat(format_map)];
}

const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
  if (mode == kFormat) {
    return NEONFormatAsString(GetNEONFormat(formats_[index]));
  }
  DCHECK_EQ(mode, kPlaceholder);
  return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}

NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
  return format_map->map[PickBits(format_map->bits)];
}

const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
  static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
                                  "2s",        "4s", "1d",  "2d", "b",
                                  "h",         "s",  "d"};
  DCHECK_LT(format, arraysize(formats));
  return formats[format];
}

const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
  DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
         (format == NF_D) || (format == NF_UNDEF));
  static const char* formats[] = {
      "undefined", "undefined", "undefined", "undefined", "undefined",
      "undefined", "undefined", "undefined", "undefined", "'B",
      "'H",        "'S",        "'D"};
  return formats[format];
}

uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
  uint8_t result = 0;
  for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
    if (bits[b] == 0) break;
    result <<= 1;
    result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
  }
  return result;
}
462 463
}  // namespace internal
}  // namespace v8
464

465
#endif  // V8_TARGET_ARCH_ARM64