assembler-arm64-inl.h 33.3 KB
Newer Older
1
// Copyright 2013 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
7

8 9
#include <type_traits>

10
#include "src/base/memory.h"
11
#include "src/codegen/arm64/assembler-arm64.h"
12
#include "src/codegen/assembler.h"
13
#include "src/debug/debug.h"
14
#include "src/objects/objects-inl.h"
15
#include "src/objects/smi.h"
16 17 18 19

namespace v8 {
namespace internal {

20
bool CpuFeatures::SupportsOptimizer() { return true; }
21

22
bool CpuFeatures::SupportsWasmSimd128() { return true; }
23

24
void RelocInfo::apply(intptr_t delta) {
25 26 27
  // On arm64 only internal references and immediate branches need extra work.
  if (RelocInfo::IsInternalReference(rmode_)) {
    // Absolute code pointer inside code object moves with the code object.
28 29 30
    intptr_t internal_ref = ReadUnalignedValue<intptr_t>(pc_);
    internal_ref += delta;  // Relocate entry.
    WriteUnalignedValue<intptr_t>(pc_, internal_ref);
31 32 33 34 35 36 37 38 39
  } else {
    Instruction* instr = reinterpret_cast<Instruction*>(pc_);
    if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
      Address old_target =
          reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
      Address new_target = old_target - delta;
      instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(new_target));
    }
  }
40 41 42
}

inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
43
  return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
44 45 46
}

inline bool CPURegister::IsZero() const {
47
  DCHECK(is_valid());
48
  return IsRegister() && (code() == kZeroRegCode);
49 50 51
}

inline bool CPURegister::IsSP() const {
52
  DCHECK(is_valid());
53
  return IsRegister() && (code() == kSPRegInternalCode);
54 55 56
}

inline void CPURegList::Combine(const CPURegList& other) {
57 58
  DCHECK(other.type() == type_);
  DCHECK(other.RegisterSizeInBits() == size_);
59 60 61 62
  list_ |= other.list();
}

inline void CPURegList::Remove(const CPURegList& other) {
63 64 65
  if (other.type() == type_) {
    list_ &= ~other.list();
  }
66 67 68
}

inline void CPURegList::Combine(const CPURegister& other) {
69 70
  DCHECK(other.type() == type_);
  DCHECK(other.SizeInBits() == size_);
71 72 73
  Combine(other.code());
}

74 75 76 77 78 79 80 81
inline void CPURegList::Remove(const CPURegister& other1,
                               const CPURegister& other2,
                               const CPURegister& other3,
                               const CPURegister& other4) {
  if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
  if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
  if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
  if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
82 83 84
}

inline void CPURegList::Combine(int code) {
85
  DCHECK(CPURegister::Create(code, size_, type_).is_valid());
86
  list_ |= (1ULL << code);
87
  DCHECK(is_valid());
88 89 90
}

inline void CPURegList::Remove(int code) {
91
  DCHECK(CPURegister::Create(code, size_, type_).is_valid());
92
  list_ &= ~(1ULL << code);
93 94 95
}

inline Register Register::XRegFromCode(unsigned code) {
96
  if (code == kSPRegInternalCode) {
97
    return sp;
98
  } else {
99
    DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
100 101
    return Register::Create(code, kXRegSizeInBits);
  }
102 103 104
}

inline Register Register::WRegFromCode(unsigned code) {
105
  if (code == kSPRegInternalCode) {
106
    return wsp;
107
  } else {
108
    DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
109 110
    return Register::Create(code, kWRegSizeInBits);
  }
111 112
}

113 114 115 116 117 118 119 120 121
inline VRegister VRegister::BRegFromCode(unsigned code) {
  DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
  return VRegister::Create(code, kBRegSizeInBits);
}

inline VRegister VRegister::HRegFromCode(unsigned code) {
  DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
  return VRegister::Create(code, kHRegSizeInBits);
}
122

123 124 125
inline VRegister VRegister::SRegFromCode(unsigned code) {
  DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
  return VRegister::Create(code, kSRegSizeInBits);
126 127
}

128 129 130 131
inline VRegister VRegister::DRegFromCode(unsigned code) {
  DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
  return VRegister::Create(code, kDRegSizeInBits);
}
132

133 134 135
inline VRegister VRegister::QRegFromCode(unsigned code) {
  DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
  return VRegister::Create(code, kQRegSizeInBits);
136 137
}

138 139 140 141
inline VRegister VRegister::VRegFromCode(unsigned code) {
  DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
  return VRegister::Create(code, kVRegSizeInBits);
}
142 143

inline Register CPURegister::W() const {
144
  DCHECK(IsRegister());
145
  return Register::WRegFromCode(code());
146 147 148 149
}

inline Register CPURegister::Reg() const {
  DCHECK(IsRegister());
150
  return Register::Create(code(), reg_size_);
151 152
}

153 154
inline VRegister CPURegister::VReg() const {
  DCHECK(IsVRegister());
155
  return VRegister::Create(code(), reg_size_);
156
}
157 158

inline Register CPURegister::X() const {
159
  DCHECK(IsRegister());
160
  return Register::XRegFromCode(code());
161 162
}

163
inline VRegister CPURegister::V() const {
164
  DCHECK(IsVRegister());
165
  return VRegister::VRegFromCode(code());
166 167 168
}

inline VRegister CPURegister::B() const {
169
  DCHECK(IsVRegister());
170
  return VRegister::BRegFromCode(code());
171 172 173
}

inline VRegister CPURegister::H() const {
174
  DCHECK(IsVRegister());
175
  return VRegister::HRegFromCode(code());
176
}
177

178
inline VRegister CPURegister::S() const {
179
  DCHECK(IsVRegister());
180
  return VRegister::SRegFromCode(code());
181 182
}

183
inline VRegister CPURegister::D() const {
184
  DCHECK(IsVRegister());
185
  return VRegister::DRegFromCode(code());
186
}
187

188
inline VRegister CPURegister::Q() const {
189
  DCHECK(IsVRegister());
190
  return VRegister::QRegFromCode(code());
191 192
}

193
// Immediate.
194
// Default initializer is for int types
195
template <typename T>
196
struct ImmediateInitializer {
197
  static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
198 199
  static inline int64_t immediate_for(T t) {
    STATIC_ASSERT(sizeof(T) <= 8);
200
    STATIC_ASSERT(std::is_integral<T>::value || std::is_enum<T>::value);
201 202 203 204
    return t;
  }
};

205 206 207 208 209
template <>
struct ImmediateInitializer<Smi> {
  static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; }
  static inline int64_t immediate_for(Smi t) {
    return static_cast<int64_t>(t.ptr());
210 211 212
  }
};

213
template <>
214
struct ImmediateInitializer<ExternalReference> {
215 216 217
  static inline RelocInfo::Mode rmode_for(ExternalReference t) {
    return RelocInfo::EXTERNAL_REFERENCE;
  }
218
  static inline int64_t immediate_for(ExternalReference t) {
219
    return static_cast<int64_t>(t.address());
220 221 222
  }
};

223
template <typename T>
224 225 226 227
Immediate::Immediate(Handle<T> handle, RelocInfo::Mode mode)
    : value_(static_cast<intptr_t>(handle.address())), rmode_(mode) {
  DCHECK(RelocInfo::IsEmbeddedObjectMode(mode));
}
228

229
template <typename T>
230 231 232 233
Immediate::Immediate(T t)
    : value_(ImmediateInitializer<T>::immediate_for(t)),
      rmode_(ImmediateInitializer<T>::rmode_for(t)) {}

234
template <typename T>
235
Immediate::Immediate(T t, RelocInfo::Mode rmode)
236
    : value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
237
  STATIC_ASSERT(std::is_integral<T>::value);
238 239
}

240
template <typename T>
241 242
Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}

243
template <typename T>
244
Operand::Operand(T t, RelocInfo::Mode rmode)
245
    : immediate_(t, rmode), reg_(NoReg) {}
246

247
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
248 249
    : immediate_(0),
      reg_(reg),
250 251
      shift_(shift),
      extend_(NO_EXTEND),
252
      shift_amount_(shift_amount) {
253 254
  DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
  DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
255
  DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
256 257 258
}

Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
259 260
    : immediate_(0),
      reg_(reg),
261 262
      shift_(NO_SHIFT),
      extend_(extend),
263
      shift_amount_(shift_amount) {
264
  DCHECK(reg.is_valid());
265
  DCHECK_LE(shift_amount, 4);
266
  DCHECK(!reg.IsSP());
267 268

  // Extend modes SXTX and UXTX require a 64-bit register.
269
  DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
270 271
}

272
bool Operand::IsHeapObjectRequest() const {
273
  DCHECK_IMPLIES(heap_object_request_.has_value(), reg_ == NoReg);
274
  DCHECK_IMPLIES(heap_object_request_.has_value(),
275
                 immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT ||
276 277 278 279 280 281 282 283
                     immediate_.rmode() == RelocInfo::CODE_TARGET);
  return heap_object_request_.has_value();
}

HeapObjectRequest Operand::heap_object_request() const {
  DCHECK(IsHeapObjectRequest());
  return *heap_object_request_;
}
284 285

bool Operand::IsImmediate() const {
286
  return reg_ == NoReg && !IsHeapObjectRequest();
287 288 289
}

bool Operand::IsShiftedRegister() const {
290
  return reg_.is_valid() && (shift_ != NO_SHIFT);
291 292 293
}

bool Operand::IsExtendedRegister() const {
294
  return reg_.is_valid() && (extend_ != NO_EXTEND);
295 296 297 298
}

bool Operand::IsZero() const {
  if (IsImmediate()) {
299
    return ImmediateValue() == 0;
300 301 302 303 304 305
  } else {
    return reg().IsZero();
  }
}

Operand Operand::ToExtendedRegister() const {
306 307
  DCHECK(IsShiftedRegister());
  DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
308 309 310
  return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}

311 312 313 314 315 316 317 318 319 320 321 322
Operand Operand::ToW() const {
  if (IsShiftedRegister()) {
    DCHECK(reg_.Is64Bits());
    return Operand(reg_.W(), shift(), shift_amount());
  } else if (IsExtendedRegister()) {
    DCHECK(reg_.Is64Bits());
    return Operand(reg_.W(), extend(), shift_amount());
  }
  DCHECK(IsImmediate());
  return *this;
}

323 324
Immediate Operand::immediate_for_heap_object_request() const {
  DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
325
          immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||
326
         (heap_object_request().kind() == HeapObjectRequest::kStringConstant &&
327
          immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT));
328 329
  return immediate_;
}
330

331
Immediate Operand::immediate() const {
332
  DCHECK(IsImmediate());
333 334 335
  return immediate_;
}

336
int64_t Operand::ImmediateValue() const {
337
  DCHECK(IsImmediate());
338 339 340
  return immediate_.value();
}

341 342 343 344
RelocInfo::Mode Operand::ImmediateRMode() const {
  DCHECK(IsImmediate() || IsHeapObjectRequest());
  return immediate_.rmode();
}
345

346
Register Operand::reg() const {
347
  DCHECK(IsShiftedRegister() || IsExtendedRegister());
348 349 350 351
  return reg_;
}

Shift Operand::shift() const {
352
  DCHECK(IsShiftedRegister());
353 354 355 356
  return shift_;
}

Extend Operand::extend() const {
357
  DCHECK(IsExtendedRegister());
358 359 360 361
  return extend_;
}

unsigned Operand::shift_amount() const {
362
  DCHECK(IsShiftedRegister() || IsExtendedRegister());
363 364 365
  return shift_amount_;
}

366
MemOperand::MemOperand()
367 368 369 370 371 372 373
    : base_(NoReg),
      regoffset_(NoReg),
      offset_(0),
      addrmode_(Offset),
      shift_(NO_SHIFT),
      extend_(NO_EXTEND),
      shift_amount_(0) {}
374

375
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
376 377 378 379 380 381 382
    : base_(base),
      regoffset_(NoReg),
      offset_(offset),
      addrmode_(addrmode),
      shift_(NO_SHIFT),
      extend_(NO_EXTEND),
      shift_amount_(0) {
383
  DCHECK(base.Is64Bits() && !base.IsZero());
384 385
}

386
MemOperand::MemOperand(Register base, Register regoffset, Extend extend,
387
                       unsigned shift_amount)
388 389 390 391 392 393 394
    : base_(base),
      regoffset_(regoffset),
      offset_(0),
      addrmode_(Offset),
      shift_(NO_SHIFT),
      extend_(extend),
      shift_amount_(shift_amount) {
395 396 397
  DCHECK(base.Is64Bits() && !base.IsZero());
  DCHECK(!regoffset.IsSP());
  DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
398 399

  // SXTX extend mode requires a 64-bit offset register.
400
  DCHECK(regoffset.Is64Bits() || (extend != SXTX));
401 402
}

403
MemOperand::MemOperand(Register base, Register regoffset, Shift shift,
404
                       unsigned shift_amount)
405 406 407 408 409 410 411
    : base_(base),
      regoffset_(regoffset),
      offset_(0),
      addrmode_(Offset),
      shift_(shift),
      extend_(NO_EXTEND),
      shift_amount_(shift_amount) {
412 413 414
  DCHECK(base.Is64Bits() && !base.IsZero());
  DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
  DCHECK(shift == LSL);
415 416 417
}

MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
418
    : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
419
  DCHECK(base.Is64Bits() && !base.IsZero());
420 421

  if (offset.IsImmediate()) {
422
    offset_ = offset.ImmediateValue();
423
  } else if (offset.IsShiftedRegister()) {
424
    DCHECK((addrmode == Offset) || (addrmode == PostIndex));
425 426

    regoffset_ = offset.reg();
Benedikt Meurer's avatar
Benedikt Meurer committed
427
    shift_ = offset.shift();
428 429 430 431 432 433
    shift_amount_ = offset.shift_amount();

    extend_ = NO_EXTEND;
    offset_ = 0;

    // These assertions match those in the shifted-register constructor.
434 435
    DCHECK(regoffset_.Is64Bits() && !regoffset_.IsSP());
    DCHECK(shift_ == LSL);
436
  } else {
437 438
    DCHECK(offset.IsExtendedRegister());
    DCHECK(addrmode == Offset);
439 440 441 442 443

    regoffset_ = offset.reg();
    extend_ = offset.extend();
    shift_amount_ = offset.shift_amount();

Benedikt Meurer's avatar
Benedikt Meurer committed
444
    shift_ = NO_SHIFT;
445 446 447
    offset_ = 0;

    // These assertions match those in the extended-register constructor.
448 449 450
    DCHECK(!regoffset_.IsSP());
    DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
    DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX)));
451 452 453 454
  }
}

bool MemOperand::IsImmediateOffset() const {
455
  return (addrmode_ == Offset) && regoffset_ == NoReg;
456 457 458
}

bool MemOperand::IsRegisterOffset() const {
459
  return (addrmode_ == Offset) && regoffset_ != NoReg;
460 461
}

462
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
463

464
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
465

466
void Assembler::Unreachable() { debug("UNREACHABLE", __LINE__, BREAK); }
467

468 469
Address Assembler::target_pointer_address_at(Address pc) {
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
470
  DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
471 472 473 474
  return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
}

// Read/Modify the code target address in the branch/call instruction at pc.
475
Address Assembler::target_address_at(Address pc, Address constant_pool) {
476 477
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
  if (instr->IsLdrLiteralX()) {
478
    return Memory<Address>(target_pointer_address_at(pc));
479 480 481 482
  } else {
    DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
    return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
  }
483 484
}

485 486 487 488 489 490 491
Tagged_t Assembler::target_compressed_address_at(Address pc,
                                                 Address constant_pool) {
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
  CHECK(instr->IsLdrLiteralW());
  return Memory<Tagged_t>(target_pointer_address_at(pc));
}

492 493 494
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
  if (instr->IsLdrLiteralX()) {
495
    return Handle<Code>(reinterpret_cast<Address*>(
496 497 498
        Assembler::target_address_at(pc, 0 /* unused */)));
  } else {
    DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
499
    DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
500 501
    return Handle<Code>::cast(
        GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
502 503 504
  }
}

505 506
AssemblerBase::EmbeddedObjectIndex
Assembler::embedded_object_index_referenced_from(Address pc) {
507
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
508 509 510 511 512 513 514
  if (instr->IsLdrLiteralX()) {
    STATIC_ASSERT(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t));
    return Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc));
  } else {
    DCHECK(instr->IsLdrLiteralW());
    return Memory<uint32_t>(target_pointer_address_at(pc));
  }
515 516 517 518 519
}

void Assembler::set_embedded_object_index_referenced_from(
    Address pc, EmbeddedObjectIndex data) {
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
520 521 522 523 524 525 526 527
  if (instr->IsLdrLiteralX()) {
    Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc)) = data;
  } else {
    DCHECK(instr->IsLdrLiteralW());
    DCHECK(is_uint32(data));
    WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc),
                                  static_cast<uint32_t>(data));
  }
528 529 530 531 532
}

Handle<HeapObject> Assembler::target_object_handle_at(Address pc) {
  return GetEmbeddedObject(
      Assembler::embedded_object_index_referenced_from(pc));
533 534
}

535 536 537 538 539 540
Address Assembler::runtime_entry_at(Address pc) {
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
  if (instr->IsLdrLiteralX()) {
    return Assembler::target_address_at(pc, 0 /* unused */);
  } else {
    DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
541
    return instr->ImmPCOffset() + options().code_range_start;
542 543
  }
}
544

545 546 547 548
int Assembler::deserialization_special_target_size(Address location) {
  Instruction* instr = reinterpret_cast<Instruction*>(location);
  if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
    return kSpecialTargetSize;
549
  } else {
550
    DCHECK_EQ(instr->InstructionBits(), 0);
551
    return kSystemPointerSize;
552 553 554
  }
}

555
void Assembler::deserialization_set_special_target_at(Address location,
556
                                                      Code code,
557 558 559 560 561 562 563 564 565
                                                      Address target) {
  Instruction* instr = reinterpret_cast<Instruction*>(location);
  if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
    if (target == 0) {
      // We are simply wiping the target out for serialization. Set the offset
      // to zero instead.
      target = location;
    }
    instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
566
    FlushInstructionCache(location, kInstrSize);
567 568
  } else {
    DCHECK_EQ(instr->InstructionBits(), 0);
569
    Memory<Address>(location) = target;
570 571 572 573 574 575
    // Intuitively, we would think it is necessary to always flush the
    // instruction cache after patching a target address in the code. However,
    // in this case, only the constant pool contents change. The instruction
    // accessing the constant pool remains unchanged, so a flush is not
    // required.
  }
576 577
}

578
void Assembler::deserialization_set_target_internal_reference_at(
579
    Address pc, Address target, RelocInfo::Mode mode) {
580
  WriteUnalignedValue<Address>(pc, target);
581 582
}

583 584
void Assembler::set_target_address_at(Address pc, Address constant_pool,
                                      Address target,
585
                                      ICacheFlushMode icache_flush_mode) {
586 587
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
  if (instr->IsLdrLiteralX()) {
588
    Memory<Address>(target_pointer_address_at(pc)) = target;
589 590 591 592 593 594 595 596 597 598 599 600 601 602
    // Intuitively, we would think it is necessary to always flush the
    // instruction cache after patching a target address in the code. However,
    // in this case, only the constant pool contents change. The instruction
    // accessing the constant pool remains unchanged, so a flush is not
    // required.
  } else {
    DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
    if (target == 0) {
      // We are simply wiping the target out for serialization. Set the offset
      // to zero instead.
      target = pc;
    }
    instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
    if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
603
      FlushInstructionCache(pc, kInstrSize);
604 605
    }
  }
606 607
}

608 609 610 611 612 613 614 615
void Assembler::set_target_compressed_address_at(
    Address pc, Address constant_pool, Tagged_t target,
    ICacheFlushMode icache_flush_mode) {
  Instruction* instr = reinterpret_cast<Instruction*>(pc);
  CHECK(instr->IsLdrLiteralW());
  Memory<Tagged_t>(target_pointer_address_at(pc)) = target;
}

616
int RelocInfo::target_address_size() {
617 618 619
  if (IsCodedSpecially()) {
    return Assembler::kSpecialTargetSize;
  } else {
620 621 622
    Instruction* instr = reinterpret_cast<Instruction*>(pc_);
    DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
    return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize;
623
  }
624 625 626
}

Address RelocInfo::target_address() {
627
  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
628
  return Assembler::target_address_at(pc_, constant_pool_);
629 630 631
}

Address RelocInfo::target_address_address() {
632
  DCHECK(HasTargetAddressAddress());
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
  Instruction* instr = reinterpret_cast<Instruction*>(pc_);
  // Read the address of the word containing the target_address in an
  // instruction stream.
  // The only architecture-independent user of this function is the serializer.
  // The serializer uses it to find out how many raw bytes of instruction to
  // output before the next target.
  // For an instruction like B/BL, where the target bits are mixed into the
  // instruction bits, the size of the target will be zero, indicating that the
  // serializer should not step forward in memory after a target is resolved
  // and written.
  // For LDR literal instructions, we can skip up to the constant pool entry
  // address. We make sure that RelocInfo is ordered by the
  // target_address_address so that we do not skip over any relocatable
  // instruction sequences.
  if (instr->IsLdrLiteralX()) {
    return constant_pool_entry_address();
  } else {
    DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
651
    return pc_;
652
  }
653 654
}

655
Address RelocInfo::constant_pool_entry_address() {
656
  DCHECK(IsInConstantPool());
657 658 659
  return Assembler::target_pointer_address_at(pc_);
}

660
HeapObject RelocInfo::target_object() {
661
  DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
662 663 664
  if (IsDataEmbeddedObject(rmode_)) {
    return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
  } else if (IsCompressedEmbeddedObject(rmode_)) {
665
    CHECK(!host_.is_null());
666 667 668 669 670 671 672
    return HeapObject::cast(Object(DecompressTaggedAny(
        host_.address(),
        Assembler::target_compressed_address_at(pc_, constant_pool_))));
  } else {
    return HeapObject::cast(
        Object(Assembler::target_address_at(pc_, constant_pool_)));
  }
673 674
}

675
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
676 677 678 679 680 681 682
  if (IsCompressedEmbeddedObject(rmode_)) {
    return HeapObject::cast(Object(DecompressTaggedAny(
        isolate,
        Assembler::target_compressed_address_at(pc_, constant_pool_))));
  } else {
    return target_object();
  }
683 684
}

685
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
686 687 688
  if (IsDataEmbeddedObject(rmode_)) {
    return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
  } else if (IsEmbeddedObjectMode(rmode_)) {
689
    return origin->target_object_handle_at(pc_);
690 691 692 693
  } else {
    DCHECK(IsCodeTarget(rmode_));
    return origin->code_target_object_handle_at(pc_);
  }
694 695
}

696
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
697 698
                                  WriteBarrierMode write_barrier_mode,
                                  ICacheFlushMode icache_flush_mode) {
699
  DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
700 701 702 703
  if (IsDataEmbeddedObject(rmode_)) {
    WriteUnalignedValue(pc_, target.ptr());
    // No need to flush icache since no instructions were changed.
  } else if (IsCompressedEmbeddedObject(rmode_)) {
704 705 706 707 708 709 710
    Assembler::set_target_compressed_address_at(
        pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
  } else {
    DCHECK(IsFullEmbeddedObject(rmode_));
    Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
                                     icache_flush_mode);
  }
711 712
  if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
      !FLAG_disable_write_barriers) {
713
    WriteBarrierForCode(host(), this, target);
714 715 716
  }
}

717
Address RelocInfo::target_external_reference() {
718
  DCHECK(rmode_ == EXTERNAL_REFERENCE);
719
  return Assembler::target_address_at(pc_, constant_pool_);
720 721
}

722 723 724 725 726 727
void RelocInfo::set_target_external_reference(
    Address target, ICacheFlushMode icache_flush_mode) {
  DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
  Assembler::set_target_address_at(pc_, constant_pool_, target,
                                   icache_flush_mode);
}
728

729 730
Address RelocInfo::target_internal_reference() {
  DCHECK(rmode_ == INTERNAL_REFERENCE);
731
  return ReadUnalignedValue<Address>(pc_);
732 733
}

734
Address RelocInfo::target_internal_reference_address() {
735
  DCHECK(rmode_ == INTERNAL_REFERENCE);
736
  return pc_;
737 738
}

739
Address RelocInfo::target_runtime_entry(Assembler* origin) {
740
  DCHECK(IsRuntimeEntry(rmode_));
741
  return origin->runtime_entry_at(pc_);
742 743
}

744
void RelocInfo::set_target_runtime_entry(Address target,
745 746
                                         WriteBarrierMode write_barrier_mode,
                                         ICacheFlushMode icache_flush_mode) {
747
  DCHECK(IsRuntimeEntry(rmode_));
748
  if (target_address() != target) {
749
    set_target_address(target, write_barrier_mode, icache_flush_mode);
750
  }
751 752
}

753 754 755 756 757
Address RelocInfo::target_off_heap_target() {
  DCHECK(IsOffHeapTarget(rmode_));
  return Assembler::target_address_at(pc_, constant_pool_);
}

758
void RelocInfo::WipeOut() {
759
  DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
760
         IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
761
         IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
762
  if (IsInternalReference(rmode_)) {
763
    WriteUnalignedValue<Address>(pc_, kNullAddress);
764 765 766
  } else if (IsCompressedEmbeddedObject(rmode_)) {
    Assembler::set_target_compressed_address_at(pc_, constant_pool_,
                                                kNullAddress);
767
  } else {
768
    Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
769
  }
770 771 772
}

LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
773
  DCHECK(rt.is_valid());
774 775 776
  if (rt.IsRegister()) {
    return rt.Is64Bits() ? LDR_x : LDR_w;
  } else {
777 778 779 780 781 782 783 784 785 786 787 788 789 790
    DCHECK(rt.IsVRegister());
    switch (rt.SizeInBits()) {
      case kBRegSizeInBits:
        return LDR_b;
      case kHRegSizeInBits:
        return LDR_h;
      case kSRegSizeInBits:
        return LDR_s;
      case kDRegSizeInBits:
        return LDR_d;
      default:
        DCHECK(rt.IsQ());
        return LDR_q;
    }
791 792 793 794
  }
}

LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
795
  DCHECK(rt.is_valid());
796 797 798
  if (rt.IsRegister()) {
    return rt.Is64Bits() ? STR_x : STR_w;
  } else {
799 800 801 802 803 804 805 806 807 808 809 810 811 812
    DCHECK(rt.IsVRegister());
    switch (rt.SizeInBits()) {
      case kBRegSizeInBits:
        return STR_b;
      case kHRegSizeInBits:
        return STR_h;
      case kSRegSizeInBits:
        return STR_s;
      case kDRegSizeInBits:
        return STR_d;
      default:
        DCHECK(rt.IsQ());
        return STR_q;
    }
813 814 815
  }
}

816 817 818 819 820 821
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
                                         const CPURegister& rt2) {
  DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
  return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
                                      LoadStorePairLBit);
}
822 823 824

LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
                                          const CPURegister& rt2) {
825
  DCHECK(AreSameSizeAndType(rt, rt2));
826 827 828 829
  USE(rt2);
  if (rt.IsRegister()) {
    return rt.Is64Bits() ? STP_x : STP_w;
  } else {
830 831 832 833 834 835 836 837 838 839
    DCHECK(rt.IsVRegister());
    switch (rt.SizeInBits()) {
      case kSRegSizeInBits:
        return STP_s;
      case kDRegSizeInBits:
        return STP_d;
      default:
        DCHECK(rt.IsQ());
        return STP_q;
    }
840 841 842
  }
}

843 844 845 846
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
  if (rt.IsRegister()) {
    return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
  } else {
847
    DCHECK(rt.IsVRegister());
848 849 850 851
    return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
  }
}

852
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
853
  DCHECK_EQ(kStartOfLabelLinkChain, 0);
854
  int offset = LinkAndGetByteOffsetTo(label);
855 856
  DCHECK(IsAligned(offset, kInstrSize));
  return offset >> kInstrSizeLog2;
857 858 859 860 861 862 863 864 865 866 867
}

Instr Assembler::Flags(FlagsUpdate S) {
  if (S == SetFlags) {
    return 1 << FlagsUpdate_offset;
  } else if (S == LeaveFlags) {
    return 0 << FlagsUpdate_offset;
  }
  UNREACHABLE();
}

868
Instr Assembler::Cond(Condition cond) { return cond << Condition_offset; }
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898

Instr Assembler::ImmPCRelAddress(int imm21) {
  CHECK(is_int21(imm21));
  Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
  Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
  Instr immlo = imm << ImmPCRelLo_offset;
  return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
}

Instr Assembler::ImmUncondBranch(int imm26) {
  CHECK(is_int26(imm26));
  return truncate_to_int26(imm26) << ImmUncondBranch_offset;
}

Instr Assembler::ImmCondBranch(int imm19) {
  CHECK(is_int19(imm19));
  return truncate_to_int19(imm19) << ImmCondBranch_offset;
}

Instr Assembler::ImmCmpBranch(int imm19) {
  CHECK(is_int19(imm19));
  return truncate_to_int19(imm19) << ImmCmpBranch_offset;
}

Instr Assembler::ImmTestBranch(int imm14) {
  CHECK(is_int14(imm14));
  return truncate_to_int14(imm14) << ImmTestBranch_offset;
}

Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
899
  DCHECK(is_uint6(bit_pos));
900 901 902 903 904 905 906 907 908
  // Subtract five from the shift offset, as we need bit 5 from bit_pos.
  unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
  unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
  b5 &= ImmTestBranchBit5_mask;
  b40 &= ImmTestBranchBit40_mask;
  return b5 | b40;
}

Instr Assembler::SF(Register rd) {
909
  return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
910 911
}

912
Instr Assembler::ImmAddSub(int imm) {
913
  DCHECK(IsImmAddSub(imm));
914
  if (is_uint12(imm)) {  // No shift required.
915
    imm <<= ImmAddSub_offset;
916
  } else {
917
    imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
918
  }
919
  return imm;
920 921 922
}

Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
923
  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
924
         ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
925 926 927 928 929
  USE(reg_size);
  return imms << ImmS_offset;
}

Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
930
  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
931
         ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
932
  USE(reg_size);
933
  DCHECK(is_uint6(immr));
934 935 936 937
  return immr << ImmR_offset;
}

Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
938 939 940
  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
  DCHECK(is_uint6(imms));
  DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
941 942 943 944 945
  USE(reg_size);
  return imms << ImmSetBits_offset;
}

Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
946 947
  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
948
         ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
949 950 951 952 953 954 955 956 957 958
  USE(reg_size);
  return immr << ImmRotate_offset;
}

Instr Assembler::ImmLLiteral(int imm19) {
  CHECK(is_int19(imm19));
  return truncate_to_int19(imm19) << ImmLLiteral_offset;
}

Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
959 960
  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
  DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
961 962 963 964 965
  USE(reg_size);
  return bitn << BitN_offset;
}

Instr Assembler::ShiftDP(Shift shift) {
966
  DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
967 968 969 970
  return shift << ShiftDP_offset;
}

Instr Assembler::ImmDPShift(unsigned amount) {
971
  DCHECK(is_uint6(amount));
972 973 974 975 976 977 978 979
  return amount << ImmDPShift_offset;
}

Instr Assembler::ExtendMode(Extend extend) {
  return extend << ExtendMode_offset;
}

Instr Assembler::ImmExtendShift(unsigned left_shift) {
980
  DCHECK_LE(left_shift, 4);
981 982 983 984
  return left_shift << ImmExtendShift_offset;
}

Instr Assembler::ImmCondCmp(unsigned imm) {
985
  DCHECK(is_uint5(imm));
986 987 988 989 990 991 992 993
  return imm << ImmCondCmp_offset;
}

Instr Assembler::Nzcv(StatusFlags nzcv) {
  return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
}

Instr Assembler::ImmLSUnsigned(int imm12) {
994
  DCHECK(is_uint12(imm12));
995 996 997 998
  return imm12 << ImmLSUnsigned_offset;
}

Instr Assembler::ImmLS(int imm9) {
999
  DCHECK(is_int9(imm9));
1000 1001 1002
  return truncate_to_int9(imm9) << ImmLS_offset;
}

1003
Instr Assembler::ImmLSPair(int imm7, unsigned size) {
1004 1005
  DCHECK_EQ(imm7,
            static_cast<int>(static_cast<uint32_t>(imm7 >> size) << size));
1006
  int scaled_imm7 = imm7 >> size;
1007
  DCHECK(is_int7(scaled_imm7));
1008 1009 1010 1011
  return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
}

Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1012
  DCHECK(is_uint1(shift_amount));
1013 1014 1015 1016
  return shift_amount << ImmShiftLS_offset;
}

Instr Assembler::ImmException(int imm16) {
1017
  DCHECK(is_uint16(imm16));
1018 1019 1020 1021
  return imm16 << ImmException_offset;
}

Instr Assembler::ImmSystemRegister(int imm15) {
1022
  DCHECK(is_uint15(imm15));
1023 1024 1025 1026
  return imm15 << ImmSystemRegister_offset;
}

Instr Assembler::ImmHint(int imm7) {
1027
  DCHECK(is_uint7(imm7));
1028 1029 1030 1031
  return imm7 << ImmHint_offset;
}

Instr Assembler::ImmBarrierDomain(int imm2) {
1032
  DCHECK(is_uint2(imm2));
1033 1034 1035 1036
  return imm2 << ImmBarrierDomain_offset;
}

Instr Assembler::ImmBarrierType(int imm2) {
1037
  DCHECK(is_uint2(imm2));
1038 1039 1040
  return imm2 << ImmBarrierType_offset;
}

1041
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
1042
  DCHECK((LSSize_offset + LSSize_width) == (kInstrSize * 8));
1043 1044 1045 1046 1047 1048 1049 1050 1051
  unsigned size = static_cast<Instr>(op >> LSSize_offset);
  if ((op & LSVector_mask) != 0) {
    // Vector register memory operations encode the access size in the "size"
    // and "opc" fields.
    if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
      size = kQRegSizeLog2;
    }
  }
  return size;
1052 1053
}

1054
Instr Assembler::ImmMoveWide(int imm) {
1055
  DCHECK(is_uint16(imm));
1056 1057 1058
  return imm << ImmMoveWide_offset;
}

1059
Instr Assembler::ShiftMoveWide(int shift) {
1060
  DCHECK(is_uint2(shift));
1061 1062 1063
  return shift << ShiftMoveWide_offset;
}

1064
Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
1065 1066

Instr Assembler::FPScale(unsigned scale) {
1067
  DCHECK(is_uint6(scale));
1068 1069 1070 1071 1072 1073 1074
  return scale << FPScale_offset;
}

const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
  return reg.Is64Bits() ? xzr : wzr;
}

1075
inline void Assembler::CheckBufferSpace() {
1076
  DCHECK_LT(pc_, buffer_start_ + buffer_->size());
1077 1078 1079
  if (buffer_space() < kGap) {
    GrowBuffer();
  }
1080 1081 1082 1083
}

inline void Assembler::CheckBuffer() {
  CheckBufferSpace();
1084
  if (pc_offset() >= next_veneer_pool_check_) {
1085
    CheckVeneerPool(false, true);
1086
  }
1087
  constpool_.MaybeCheck();
1088 1089
}

1090 1091
}  // namespace internal
}  // namespace v8
1092

1093
#endif  // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_