assembler.h 21.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// The original source code covered by the above license above has been
// modified significantly by Google Inc.
33
// Copyright 2012 the V8 project authors. All rights reserved.
34 35 36 37

#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_

38
#include <forward_list>
39
#include <iosfwd>
40
#include <map>
41

42
#include "src/allocation.h"
43
#include "src/code-reference.h"
44
#include "src/contexts.h"
45
#include "src/deoptimize-reason.h"
46
#include "src/double.h"
47
#include "src/external-reference.h"
48
#include "src/flags.h"
49
#include "src/globals.h"
50
#include "src/label.h"
51
#include "src/objects.h"
52
#include "src/register-configuration.h"
53
#include "src/reglist.h"
54
#include "src/reloc-info.h"
55

56
namespace v8 {
57

58
// Forward declarations.
59 60
class ApiFunction;

61
namespace internal {
62

63
// Forward declarations.
64
class EmbeddedData;
65
class InstructionStream;
66
class Isolate;
67
class SCTableReference;
68
class SourcePosition;
69
class StatsCounter;
70

71 72 73 74 75 76 77 78 79 80 81 82
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.

class JumpOptimizationInfo {
 public:
  bool is_collecting() const { return stage_ == kCollection; }
  bool is_optimizing() const { return stage_ == kOptimization; }
  void set_optimizing() { stage_ = kOptimization; }

  bool is_optimizable() const { return optimizable_; }
  void set_optimizable() { optimizable_ = true; }

83 84 85 86
  // Used to verify the instruction sequence is always the same in two stages.
  size_t hash_code() const { return hash_code_; }
  void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }

87 88 89 90 91 92
  std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }

 private:
  enum { kCollection, kOptimization } stage_ = kCollection;
  bool optimizable_ = false;
  std::vector<uint32_t> farjmp_bitmap_;
93
  size_t hash_code_ = 0u;
94 95
};

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
class HeapObjectRequest {
 public:
  explicit HeapObjectRequest(double heap_number, int offset = -1);
  explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);

  enum Kind { kHeapNumber, kCodeStub };
  Kind kind() const { return kind_; }

  double heap_number() const {
    DCHECK_EQ(kind(), kHeapNumber);
    return value_.heap_number;
  }

  CodeStub* code_stub() const {
    DCHECK_EQ(kind(), kCodeStub);
    return value_.code_stub;
  }

  // The code buffer offset at the time of the request.
  int offset() const {
    DCHECK_GE(offset_, 0);
    return offset_;
  }
  void set_offset(int offset) {
    DCHECK_LT(offset_, 0);
    offset_ = offset;
    DCHECK_GE(offset_, 0);
  }

 private:
  Kind kind_;

  union {
    double heap_number;
    CodeStub* code_stub;
  } value_;

  int offset_;
};

136 137 138
// -----------------------------------------------------------------------------
// Platform independent assembler base class.

139 140
enum class CodeObjectRequired { kNo, kYes };

141
struct V8_EXPORT_PRIVATE AssemblerOptions {
142
  // Recording reloc info for external references and off-heap targets is
143 144 145 146
  // needed whenever code is serialized, e.g. into the snapshot or as a WASM
  // module. This flag allows this reloc info to be disabled for code that
  // will not survive process destruction.
  bool record_reloc_info_for_serialization = true;
147 148 149 150
  // Recording reloc info can be disabled wholesale. This is needed when the
  // assembler is used on existing code directly (e.g. JumpTableAssembler)
  // without any buffer to hold reloc information.
  bool disable_reloc_info_for_patching = false;
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
  // Enables access to exrefs by computing a delta from the root array.
  // Only valid if code will not survive the process.
  bool enable_root_array_delta_access = false;
  // Enables specific assembler sequences only used for the simulator.
  bool enable_simulator_code = false;
  // Enables use of isolate-independent constants, indirected through the
  // root array.
  // (macro assembler feature).
  bool isolate_independent_code = false;
  // Enables the use of isolate-independent builtins through an off-heap
  // trampoline. (macro assembler feature).
  bool inline_offheap_trampolines = false;
  // On some platforms, all code is within a given range in the process,
  // and the start of this range is configured here.
  Address code_range_start = 0;
166 167 168 169
  // Enable pc-relative calls/jumps on platforms that support it. When setting
  // this flag, the code range must be small enough to fit all offsets into
  // the instruction immediates.
  bool use_pc_relative_calls_and_jumps = false;
170 171 172 173 174

  static AssemblerOptions Default(
      Isolate* isolate, bool explicitly_support_serialization = false);
};

175
class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
176
 public:
177
  AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
178
  virtual ~AssemblerBase();
179

180
  const AssemblerOptions& options() const { return options_; }
181

182 183 184
  bool emit_debug_code() const { return emit_debug_code_; }
  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }

185 186
  bool predictable_code_size() const { return predictable_code_size_; }
  void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
187

188 189 190 191
  uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
  void set_enabled_cpu_features(uint64_t features) {
    enabled_cpu_features_ = features;
  }
192 193
  // Features are usually enabled by CpuFeatureScope, which also asserts that
  // the features are supported before they are enabled.
194 195 196
  bool IsEnabled(CpuFeature f) {
    return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
  }
197 198 199
  void EnableCpuFeature(CpuFeature f) {
    enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
  }
200

201 202 203
  bool is_constant_pool_available() const {
    if (FLAG_enable_embedded_constant_pool) {
      return constant_pool_available_;
204
    } else {
205
      // Embedded constant pool not supported on this architecture.
206 207 208 209
      UNREACHABLE();
    }
  }

210 211 212 213 214 215 216
  JumpOptimizationInfo* jump_optimization_info() {
    return jump_optimization_info_;
  }
  void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
    jump_optimization_info_ = jump_opt;
  }

217 218 219 220
  // Overwrite a host NaN with a quiet target NaN.  Used by mksnapshot for
  // cross-snapshotting.
  static void QuietNaN(HeapObject* nan) { }

221 222
  int pc_offset() const { return static_cast<int>(pc_ - buffer_); }

223 224 225 226
  // This function is called when code generation is aborted, so that
  // the assembler could clean up internal data structures.
  virtual void AbortedCodeGeneration() { }

227
  // Debugging
228
  void Print(Isolate* isolate);
229

230 231
  static const int kMinimalBufferSize = 4*KB;

232
  static void FlushICache(void* start, size_t size);
233 234 235
  static void FlushICache(Address start, size_t size) {
    return FlushICache(reinterpret_cast<void*>(start), size);
  }
236

237 238 239
  // Used to print the name of some special registers.
  static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }

240
 protected:
241 242 243 244 245 246 247 248 249 250 251
  // Add 'target' to the {code_targets_} vector, if necessary, and return the
  // offset at which it is stored.
  int AddCodeTarget(Handle<Code> target);
  Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
  // Update to the code target at {code_target_index} to {target}.
  void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
  // Reserves space in the code target vector.
  void ReserveCodeTargetSpace(size_t num_of_code_targets) {
    code_targets_.reserve(num_of_code_targets);
  }

252 253 254 255 256
  // The buffer into which code and relocation info are generated. It could
  // either be owned by the assembler or be provided externally.
  byte* buffer_;
  int buffer_size_;
  bool own_buffer_;
257 258 259 260
  std::forward_list<HeapObjectRequest> heap_object_requests_;
  // The program counter, which points into the buffer above and moves forward.
  // TODO(jkummerow): This should probably have type {Address}.
  byte* pc_;
261

262 263 264
  void set_constant_pool_available(bool available) {
    if (FLAG_enable_embedded_constant_pool) {
      constant_pool_available_ = available;
265
    } else {
266
      // Embedded constant pool not supported on this architecture.
267 268 269 270
      UNREACHABLE();
    }
  }

271 272 273 274 275 276
  // {RequestHeapObject} records the need for a future heap number allocation or
  // code stub generation. After code assembly, each platform's
  // {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
  // objects and place them where they are expected (determined by the pc offset
  // associated with each request).
  void RequestHeapObject(HeapObjectRequest request);
277

278
 private:
279 280 281 282 283 284 285 286
  // Before we copy code into the code space, we sometimes cannot encode
  // call/jump code targets as we normally would, as the difference between the
  // instruction's location in the temporary buffer and the call target is not
  // guaranteed to fit in the instruction's offset field. We keep track of the
  // code handles we encounter in calls in this vector, and encode the index of
  // the code handle in the vector instead.
  std::vector<Handle<Code>> code_targets_;

287
  const AssemblerOptions options_;
288
  uint64_t enabled_cpu_features_;
289
  bool emit_debug_code_;
290
  bool predictable_code_size_;
291 292 293

  // Indicates whether the constant pool can be accessed, which is only possible
  // if the pp register points to the current code object's constant pool.
294
  bool constant_pool_available_;
295

296 297
  JumpOptimizationInfo* jump_optimization_info_;

298 299 300
  // Constant pool.
  friend class FrameAndConstantPoolScope;
  friend class ConstantPoolUnavailableScope;
301 302
};

303 304 305 306 307 308 309 310 311
// Avoids emitting debug code during the lifetime of this scope object.
class DontEmitDebugCodeScope BASE_EMBEDDED {
 public:
  explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
      : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
    assembler_->set_emit_debug_code(false);
  }
  ~DontEmitDebugCodeScope() {
    assembler_->set_emit_debug_code(old_value_);
312
  }
313 314 315 316 317 318
 private:
  AssemblerBase* assembler_;
  bool old_value_;
};


319 320 321 322
// Avoids using instructions that vary in size in unpredictable ways between the
// snapshot and the running VM.
class PredictableCodeSizeScope {
 public:
323 324
  PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
  ~PredictableCodeSizeScope();
325 326

 private:
327 328 329 330
  AssemblerBase* const assembler_;
  int const expected_size_;
  int const start_offset_;
  bool const old_value_;
331 332
};

333

334 335 336
// Enable a specified feature within a scope.
class CpuFeatureScope BASE_EMBEDDED {
 public:
337 338 339 340 341
  enum CheckPolicy {
    kCheckSupported,
    kDontCheckSupported,
  };

342
#ifdef DEBUG
343 344
  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
                  CheckPolicy check = kCheckSupported);
345 346 347 348 349 350
  ~CpuFeatureScope();

 private:
  AssemblerBase* assembler_;
  uint64_t old_enabled_;
#else
351 352
  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
                  CheckPolicy check = kCheckSupported) {}
353 354
  // Define a destructor to avoid unused variable warnings.
  ~CpuFeatureScope() {}
355 356 357 358
#endif
};


359 360 361 362 363 364 365 366 367 368
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
//   if (assembler->IsSupported(SSE3)) {
//     CpuFeatureScope fscope(assembler, SSE3);
//     // Generate code containing SSE3 instructions.
//   } else {
//     // Generate alternative code.
//   }
class CpuFeatures : public AllStatic {
369
 public:
370 371 372 373 374 375 376
  static void Probe(bool cross_compile) {
    STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
    if (initialized_) return;
    initialized_ = true;
    ProbeImpl(cross_compile);
  }

377 378 379 380 381
  static unsigned SupportedFeatures() {
    Probe(false);
    return supported_;
  }

382 383 384 385
  static bool IsSupported(CpuFeature f) {
    return (supported_ & (1u << f)) != 0;
  }

386
  static inline bool SupportsOptimizer();
387

388
  static inline bool SupportsWasmSimd128();
389

390
  static inline unsigned icache_line_size() {
391
    DCHECK_NE(icache_line_size_, 0);
392 393 394 395
    return icache_line_size_;
  }

  static inline unsigned dcache_line_size() {
396
    DCHECK_NE(dcache_line_size_, 0);
397
    return dcache_line_size_;
398 399 400 401
  }

  static void PrintTarget();
  static void PrintFeatures();
402

403 404 405
 private:
  friend class ExternalReference;
  friend class AssemblerBase;
406 407 408
  // Flush instruction cache.
  static void FlushICache(void* start, size_t size);

409 410 411 412
  // Platform-dependent implementation.
  static void ProbeImpl(bool cross_compile);

  static unsigned supported_;
413 414
  static unsigned icache_line_size_;
  static unsigned dcache_line_size_;
415 416
  static bool initialized_;
  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
417 418
};

419 420 421
// -----------------------------------------------------------------------------
// Utility functions

422
// Computes pow(x, y) with the special cases in the spec for Math.pow.
423
double power_helper(Isolate* isolate, double x, double y);
424 425 426
double power_double_int(double x, int y);
double power_double_double(double x, double y);

427

428 429 430 431 432 433
// -----------------------------------------------------------------------------
// Constant pool support

class ConstantPoolEntry {
 public:
  ConstantPoolEntry() {}
434 435
  ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
                    RelocInfo::Mode rmode = RelocInfo::NONE)
436 437
      : position_(position),
        merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
438 439 440 441
        value_(value),
        rmode_(rmode) {}
  ConstantPoolEntry(int position, Double value,
                    RelocInfo::Mode rmode = RelocInfo::NONE)
442 443
      : position_(position),
        merged_index_(SHARING_ALLOWED),
444 445
        value64_(value.AsUint64()),
        rmode_(rmode) {}
446 447 448 449 450 451 452 453 454

  int position() const { return position_; }
  bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
  bool is_merged() const { return merged_index_ >= 0; }
  int merged_index(void) const {
    DCHECK(is_merged());
    return merged_index_;
  }
  void set_merged_index(int index) {
455
    DCHECK(sharing_ok());
456 457 458 459
    merged_index_ = index;
    DCHECK(is_merged());
  }
  int offset(void) const {
460
    DCHECK_GE(merged_index_, 0);
461 462 463
    return merged_index_;
  }
  void set_offset(int offset) {
464
    DCHECK_GE(offset, 0);
465 466 467
    merged_index_ = offset;
  }
  intptr_t value() const { return value_; }
468
  uint64_t value64() const { return value64_; }
469
  RelocInfo::Mode rmode() const { return rmode_; }
470 471 472 473 474 475 476 477 478 479 480 481 482 483

  enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };

  static int size(Type type) {
    return (type == INTPTR) ? kPointerSize : kDoubleSize;
  }

  enum Access { REGULAR, OVERFLOWED };

 private:
  int position_;
  int merged_index_;
  union {
    intptr_t value_;
484
    uint64_t value64_;
485
  };
486 487 488
  // TODO(leszeks): The way we use this, it could probably be packed into
  // merged_index_ if size is a concern.
  RelocInfo::Mode rmode_;
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
  enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
};


// -----------------------------------------------------------------------------
// Embedded constant pool support

class ConstantPoolBuilder BASE_EMBEDDED {
 public:
  ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);

  // Add pointer-sized constant to the embedded constant pool
  ConstantPoolEntry::Access AddEntry(int position, intptr_t value,
                                     bool sharing_ok) {
    ConstantPoolEntry entry(position, value, sharing_ok);
    return AddEntry(entry, ConstantPoolEntry::INTPTR);
  }

  // Add double constant to the embedded constant pool
508 509
  ConstantPoolEntry::Access AddEntry(int position, Double value) {
    ConstantPoolEntry entry(position, value);
510 511 512
    return AddEntry(entry, ConstantPoolEntry::DOUBLE);
  }

513 514 515 516 517
  // Add double constant to the embedded constant pool
  ConstantPoolEntry::Access AddEntry(int position, double value) {
    return AddEntry(position, Double(value));
  }

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
  // Previews the access type required for the next new entry to be added.
  ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;

  bool IsEmpty() {
    return info_[ConstantPoolEntry::INTPTR].entries.empty() &&
           info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
           info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
           info_[ConstantPoolEntry::DOUBLE].shared_entries.empty();
  }

  // Emit the constant pool.  Invoke only after all entries have been
  // added and all instructions have been emitted.
  // Returns position of the emitted pool (zero implies no constant pool).
  int Emit(Assembler* assm);

  // Returns the label associated with the start of the constant pool.
  // Linking to this label in the function prologue may provide an
  // efficient means of constant pool pointer register initialization
  // on some architectures.
  inline Label* EmittedPosition() { return &emitted_label_; }

 private:
  ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
                                     ConstantPoolEntry::Type type);
  void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
  void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
                 ConstantPoolEntry::Type type);

  struct PerTypeEntryInfo {
    PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {}
    bool overflow() const {
      return (overflow_start >= 0 &&
              overflow_start < static_cast<int>(entries.size()));
    }
    int regular_reach_bits;
    int regular_count;
    int overflow_start;
    std::vector<ConstantPoolEntry> entries;
    std::vector<ConstantPoolEntry> shared_entries;
  };

  Label emitted_label_;  // Records pc_offset of emitted pool
  PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};

563 564 565 566 567 568 569 570 571 572 573 574 575 576
// Base type for CPU Registers.
//
// 1) We would prefer to use an enum for registers, but enum values are
// assignment-compatible with int, which has caused code-generation bugs.
//
// 2) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the class in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
template <typename SubType, int kAfterLastRegister>
class RegisterBase {
577 578 579 580
  // Internal enum class; used for calling constexpr methods, where we need to
  // pass an integral type as template parameter.
  enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };

581 582 583 584 585 586 587 588 589 590 591 592
 public:
  static constexpr int kCode_no_reg = -1;
  static constexpr int kNumRegisters = kAfterLastRegister;

  static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }

  template <int code>
  static constexpr SubType from_code() {
    static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
    return SubType{code};
  }

593 594 595 596 597 598 599 600 601 602 603 604 605
  constexpr operator RegisterCode() const {
    return static_cast<RegisterCode>(reg_code_);
  }

  template <RegisterCode reg_code>
  static constexpr int code() {
    static_assert(
        reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
        "must be valid reg");
    return static_cast<int>(reg_code);
  }

  template <RegisterCode reg_code>
606 607
  static constexpr RegList bit() {
    return RegList{1} << code<reg_code>();
608 609
  }

610 611 612 613 614 615
  static SubType from_code(int code) {
    DCHECK_LE(0, code);
    DCHECK_GT(kNumRegisters, code);
    return SubType{code};
  }

616
  // Constexpr version (pass registers as template parameters).
617 618 619 620 621
  template <RegisterCode... reg_codes>
  static constexpr RegList ListOf() {
    return CombineRegLists(RegisterBase::bit<reg_codes>()...);
  }

622 623 624 625 626 627
  // Non-constexpr version (pass registers as method parameters).
  template <typename... Register>
  static RegList ListOf(Register... regs) {
    return CombineRegLists(regs.bit()...);
  }

628 629 630 631 632 633 634
  bool is_valid() const { return reg_code_ != kCode_no_reg; }

  int code() const {
    DCHECK(is_valid());
    return reg_code_;
  }

635
  RegList bit() const { return RegList{1} << code(); }
636

637
  inline constexpr bool operator==(SubType other) const {
638 639
    return reg_code_ == other.reg_code_;
  }
640 641 642
  inline constexpr bool operator!=(SubType other) const {
    return reg_code_ != other.reg_code_;
  }
643 644 645 646 647 648

 protected:
  explicit constexpr RegisterBase(int code) : reg_code_(code) {}
  int reg_code_;
};

649 650 651 652 653 654
template <typename SubType, int kAfterLastRegister>
inline std::ostream& operator<<(std::ostream& os,
                                RegisterBase<SubType, kAfterLastRegister> reg) {
  return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
}

655 656
}  // namespace internal
}  // namespace v8
657
#endif  // V8_ASSEMBLER_H_