instructions-arm64.cc 14 KB
Newer Older
1
// Copyright 2013 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#if V8_TARGET_ARCH_ARM64
6

7 8
#include "src/codegen/arm64/instructions-arm64.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
9 10 11 12 13 14 15 16 17 18 19 20

namespace v8 {
namespace internal {

bool Instruction::IsLoad() const {
  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
    return false;
  }

  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
    return Mask(LoadStorePairLBit) != 0;
  } else {
21
    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
22 23 24 25 26 27 28 29 30 31
    switch (op) {
      case LDRB_w:
      case LDRH_w:
      case LDR_w:
      case LDR_x:
      case LDRSB_w:
      case LDRSB_x:
      case LDRSH_w:
      case LDRSH_x:
      case LDRSW_x:
32 33
      case LDR_b:
      case LDR_h:
34
      case LDR_s:
35 36 37
      case LDR_d:
      case LDR_q:
        return true;
38 39
      default:
        return false;
40 41 42 43 44 45 46 47 48 49 50 51
    }
  }
}

bool Instruction::IsStore() const {
  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
    return false;
  }

  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
    return Mask(LoadStorePairLBit) == 0;
  } else {
52
    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
53 54 55 56 57
    switch (op) {
      case STRB_w:
      case STRH_w:
      case STR_w:
      case STR_x:
58 59
      case STR_b:
      case STR_h:
60
      case STR_s:
61 62 63
      case STR_d:
      case STR_q:
        return true;
64 65
      default:
        return false;
66 67 68 69
    }
  }
}

70
static uint64_t RotateRight(uint64_t value, unsigned int rotate,
71
                            unsigned int width) {
72
  DCHECK_LE(width, 64);
73
  rotate &= 63;
74
  if (rotate == 0) return value;
75
  return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
76 77 78
         (value >> rotate);
}

79
static uint64_t RepeatBitsAcrossReg(unsigned reg_size, uint64_t value,
80
                                    unsigned width) {
81
  DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
82
         (width == 32));
83
  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
84
  uint64_t result = value & ((1ULL << width) - 1ULL);
85 86 87 88 89 90 91 92 93 94
  for (unsigned i = width; i < reg_size; i *= 2) {
    result |= (result << i);
  }
  return result;
}

// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
95
  unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
96 97 98
  int32_t n = BitN();
  int32_t imm_s = ImmSetBits();
  int32_t imm_r = ImmRotate();
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120

  // An integer is constructed from the n, imm_s and imm_r bits according to
  // the following table:
  //
  //  N   imms    immr    size        S             R
  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
  // (s bits must not be all set)
  //
  // A pattern is constructed of size bits, where the least significant S+1
  // bits are set. The pattern is rotated right by R, and repeated across a
  // 32 or 64-bit value, depending on destination register width.
  //

  if (n == 1) {
    if (imm_s == 0x3F) {
      return 0;
    }
121
    uint64_t bits = (1ULL << (imm_s + 1)) - 1;
122 123 124 125 126 127 128 129 130 131 132
    return RotateRight(bits, imm_r, 64);
  } else {
    if ((imm_s >> 1) == 0x1F) {
      return 0;
    }
    for (int width = 0x20; width >= 0x2; width >>= 1) {
      if ((imm_s & width) == 0) {
        int mask = width - 1;
        if ((imm_s & mask) == mask) {
          return 0;
        }
133
        uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
134 135
        return RepeatBitsAcrossReg(
            reg_size, RotateRight(bits, imm_r & mask, width), width);
136 137 138 139 140 141
      }
    }
  }
  UNREACHABLE();
}

142 143
uint32_t Instruction::ImmNEONabcdefgh() const {
  return ImmNEONabc() << 5 | ImmNEONdefgh();
144
}
145

146 147 148
float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }

double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
149

150
float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
151

152 153
double Instruction::ImmNEONFP64() const {
  return Imm8ToFP64(ImmNEONabcdefgh());
154
}
155

156 157
unsigned CalcLSDataSize(LoadStoreOp op) {
  DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
158
            kInstrSize * 8);
159 160 161 162 163 164 165 166 167 168
  unsigned size = static_cast<Instr>(op) >> LSSize_offset;
  if ((op & LSVector_mask) != 0) {
    // Vector register memory operations encode the access size in the "size"
    // and "opc" fields.
    if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
      size = kQRegSizeLog2;
    }
  }
  return size;
}
169

170 171 172
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
  static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
  static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
173
  switch (op) {
174 175 176
    case STP_q:
    case LDP_q:
      return kQRegSizeLog2;
177 178 179
    case STP_x:
    case LDP_x:
    case STP_d:
180 181 182 183
    case LDP_d:
      return kXRegSizeLog2;
    default:
      return kWRegSizeLog2;
184 185 186
  }
}

187 188
int64_t Instruction::ImmPCOffset() {
  int64_t offset;
189 190 191 192 193 194
  if (IsPCRelAddressing()) {
    // PC-relative addressing. Only ADR is supported.
    offset = ImmPCRel();
  } else if (BranchType() != UnknownBranchType) {
    // All PC-relative branches.
    // Relative branch offsets are instruction-size-aligned.
195
    offset = ImmBranch() * kInstrSize;
196 197
  } else if (IsUnresolvedInternalReference()) {
    // Internal references are always word-aligned.
198
    offset = ImmUnresolvedInternalReference() * kInstrSize;
199 200
  } else {
    // Load literal (offset from PC).
201
    DCHECK(IsLdrLiteral());
202 203
    // The offset is always shifted by 2 bits, even for loads to 64-bits
    // registers.
204
    offset = ImmLLiteral() * kInstrSize;
205 206 207 208 209
  }
  return offset;
}

Instruction* Instruction::ImmPCOffsetTarget() {
210
  return InstructionAtOffset(ImmPCOffset());
211 212
}

213
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
214
                                     ptrdiff_t offset) {
215 216
  DCHECK_EQ(offset % kInstrSize, 0);
  return is_intn(offset / kInstrSize, ImmBranchRangeBitwidth(branch_type));
217 218 219
}

bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
220
  return IsValidImmPCOffset(BranchType(), DistanceTo(target));
221 222
}

223
void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options,
224
                                       Instruction* target) {
225
  if (IsPCRelAddressing()) {
226
    SetPCRelImmTarget(options, target);
227 228
  } else if (BranchType() != UnknownBranchType) {
    SetBranchImmTarget(target);
229
  } else if (IsUnresolvedInternalReference()) {
230
    SetUnresolvedInternalReferenceImmTarget(options, target);
231
  } else {
232
    // Load literal (offset from PC).
233 234 235 236
    SetImmLLiteral(target);
  }
}

237
void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
238
                                    Instruction* target) {
239
  // ADRP is not supported, so 'this' must point to an ADR instruction.
240
  DCHECK(IsAdr());
241

242
  ptrdiff_t target_offset = DistanceTo(target);
243 244
  Instr imm;
  if (Instruction::IsValidPCRelOffset(target_offset)) {
245
    imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
246 247
    SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
  } else {
248
    PatchingAssembler patcher(options, reinterpret_cast<byte*>(this),
249
                              PatchingAssembler::kAdrFarPatchableNInstrs);
250
    patcher.PatchAdrFar(target_offset);
251
  }
252 253 254
}

void Instruction::SetBranchImmTarget(Instruction* target) {
255
  DCHECK(IsAligned(DistanceTo(target), kInstrSize));
256
  DCHECK(IsValidImmPCOffset(BranchType(), DistanceTo(target)));
257
  int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
  Instr branch_imm = 0;
  uint32_t imm_mask = 0;
  switch (BranchType()) {
    case CondBranchType: {
      branch_imm = Assembler::ImmCondBranch(offset);
      imm_mask = ImmCondBranch_mask;
      break;
    }
    case UncondBranchType: {
      branch_imm = Assembler::ImmUncondBranch(offset);
      imm_mask = ImmUncondBranch_mask;
      break;
    }
    case CompareBranchType: {
      branch_imm = Assembler::ImmCmpBranch(offset);
      imm_mask = ImmCmpBranch_mask;
      break;
    }
    case TestBranchType: {
      branch_imm = Assembler::ImmTestBranch(offset);
      imm_mask = ImmTestBranch_mask;
      break;
    }
281 282
    default:
      UNREACHABLE();
283 284 285 286
  }
  SetInstructionBits(Mask(~imm_mask) | branch_imm);
}

287
void Instruction::SetUnresolvedInternalReferenceImmTarget(
288
    const AssemblerOptions& options, Instruction* target) {
289
  DCHECK(IsUnresolvedInternalReference());
290 291
  DCHECK(IsAligned(DistanceTo(target), kInstrSize));
  DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
292
  int32_t target_offset =
293
      static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
294 295 296
  uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
  uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);

297
  PatchingAssembler patcher(options, reinterpret_cast<byte*>(this), 2);
298 299 300 301
  patcher.brk(high16);
  patcher.brk(low16);
}

302
void Instruction::SetImmLLiteral(Instruction* source) {
303
  DCHECK(IsLdrLiteral());
304
  DCHECK(IsAligned(DistanceTo(source), kInstrSize));
305 306 307
  DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
  Instr imm = Assembler::ImmLLiteral(
      static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
308 309 310 311 312
  Instr mask = ImmLLiteral_mask;

  SetInstructionBits(Mask(~mask) | imm);
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(IntegerFormatMap());
}

NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
                                     const NEONFormatMap* format) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(format);
}

NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
                                     const NEONFormatMap* format0,
                                     const NEONFormatMap* format1) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(format0, format1);
}

NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
                                     const NEONFormatMap* format0,
                                     const NEONFormatMap* format1,
                                     const NEONFormatMap* format2) {
  instrbits_ = instr->InstructionBits();
  SetFormatMaps(format0, format1, format2);
}

void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
                                      const NEONFormatMap* format1,
                                      const NEONFormatMap* format2) {
  DCHECK_NOT_NULL(format0);
  formats_[0] = format0;
344 345
  formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
  formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
346 347 348 349
  // Support four parameters form (e.i. ld4r)
  // to avoid using positional arguments in DisassemblingDecoder.
  // See: https://crbug.com/v8/10365
  formats_[3] = formats_[2];
350 351 352 353 354 355 356 357
}

void NEONFormatDecoder::SetFormatMap(unsigned index,
                                     const NEONFormatMap* format) {
  DCHECK_LT(index, arraysize(formats_));
  DCHECK_NOT_NULL(format);
  formats_[index] = format;
}
358

359
const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
360 361
  return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder,
                    kPlaceholder);
362 363 364 365 366
}

const char* NEONFormatDecoder::Substitute(const char* string,
                                          SubstitutionMode mode0,
                                          SubstitutionMode mode1,
367 368
                                          SubstitutionMode mode2,
                                          SubstitutionMode mode3) {
369
  snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
370 371
           GetSubstitute(1, mode1), GetSubstitute(2, mode2),
           GetSubstitute(3, mode3));
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
  return form_buffer_;
}

const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
  if ((instrbits_ & NEON_Q) != 0) {
    snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
    return mne_buffer_;
  }
  return mnemonic;
}

VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
  return GetVectorFormat(formats_[format_index]);
}

VectorFormat NEONFormatDecoder::GetVectorFormat(
    const NEONFormatMap* format_map) {
  static const VectorFormat vform[] = {
      kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
      kFormat2S,        kFormat4S, kFormat1D,  kFormat2D, kFormatB,
      kFormatH,         kFormatS,  kFormatD};
  DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
  return vform[GetNEONFormat(format_map)];
}

const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
  if (mode == kFormat) {
    return NEONFormatAsString(GetNEONFormat(formats_[index]));
  }
  DCHECK_EQ(mode, kPlaceholder);
  return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}

NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
  return format_map->map[PickBits(format_map->bits)];
}

const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
  static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
                                  "2s",        "4s", "1d",  "2d", "b",
                                  "h",         "s",  "d"};
  DCHECK_LT(format, arraysize(formats));
  return formats[format];
}

const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
  DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
         (format == NF_D) || (format == NF_UNDEF));
  static const char* formats[] = {
      "undefined", "undefined", "undefined", "undefined", "undefined",
      "undefined", "undefined", "undefined", "undefined", "'B",
      "'H",        "'S",        "'D"};
  return formats[format];
}

uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
  uint8_t result = 0;
  for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
    if (bits[b] == 0) break;
    result <<= 1;
    result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
  }
  return result;
}
436 437
}  // namespace internal
}  // namespace v8
438

439
#endif  // V8_TARGET_ARCH_ARM64