assembler.h 16.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// The original source code covered by the above license above has been
// modified significantly by Google Inc.
33
// Copyright 2012 the V8 project authors. All rights reserved.
34

35 36
#ifndef V8_CODEGEN_ASSEMBLER_H_
#define V8_CODEGEN_ASSEMBLER_H_
37

38
#include <forward_list>
39
#include <memory>
40
#include <unordered_map>
41

42
#include "src/base/macros.h"
43
#include "src/base/memory.h"
44 45
#include "src/codegen/code-comments.h"
#include "src/codegen/cpu-features.h"
46
#include "src/codegen/external-reference.h"
47 48
#include "src/codegen/reglist.h"
#include "src/codegen/reloc-info.h"
49
#include "src/common/globals.h"
50
#include "src/deoptimizer/deoptimize-reason.h"
51
#include "src/flags/flags.h"
52
#include "src/handles/handles.h"
53
#include "src/objects/objects.h"
54

55
namespace v8 {
56

57
// Forward declarations.
58 59
class ApiFunction;

60
namespace internal {
61

62 63 64 65
using base::Memory;
using base::ReadUnalignedValue;
using base::WriteUnalignedValue;

66
// Forward declarations.
67
class EmbeddedData;
68
class OffHeapInstructionStream;
69
class Isolate;
70
class SCTableReference;
71
class SourcePosition;
72
class StatsCounter;
73
class StringConstantBase;
74

75 76 77 78 79 80 81
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.

class JumpOptimizationInfo {
 public:
  bool is_collecting() const { return stage_ == kCollection; }
  bool is_optimizing() const { return stage_ == kOptimization; }
Georg Neis's avatar
Georg Neis committed
82 83 84 85
  void set_optimizing() {
    DCHECK(is_optimizable());
    stage_ = kOptimization;
  }
86 87

  bool is_optimizable() const { return optimizable_; }
Georg Neis's avatar
Georg Neis committed
88 89 90 91
  void set_optimizable() {
    DCHECK(is_collecting());
    optimizable_ = true;
  }
92

93 94 95 96
  // Used to verify the instruction sequence is always the same in two stages.
  size_t hash_code() const { return hash_code_; }
  void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }

97 98 99 100 101 102
  std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }

 private:
  enum { kCollection, kOptimization } stage_ = kCollection;
  bool optimizable_ = false;
  std::vector<uint32_t> farjmp_bitmap_;
103
  size_t hash_code_ = 0u;
104 105
};

106 107 108
class HeapObjectRequest {
 public:
  explicit HeapObjectRequest(double heap_number, int offset = -1);
109
  explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
110

111
  enum Kind { kHeapNumber, kStringConstant };
112 113 114 115 116 117 118
  Kind kind() const { return kind_; }

  double heap_number() const {
    DCHECK_EQ(kind(), kHeapNumber);
    return value_.heap_number;
  }

119 120 121 122 123
  const StringConstantBase* string() const {
    DCHECK_EQ(kind(), kStringConstant);
    return value_.string;
  }

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
  // The code buffer offset at the time of the request.
  int offset() const {
    DCHECK_GE(offset_, 0);
    return offset_;
  }
  void set_offset(int offset) {
    DCHECK_LT(offset_, 0);
    offset_ = offset;
    DCHECK_GE(offset_, 0);
  }

 private:
  Kind kind_;

  union {
    double heap_number;
140
    const StringConstantBase* string;
141 142 143 144 145
  } value_;

  int offset_;
};

146 147 148
// -----------------------------------------------------------------------------
// Platform independent assembler base class.

149 150
enum class CodeObjectRequired { kNo, kYes };

151
struct V8_EXPORT_PRIVATE AssemblerOptions {
152
  // Recording reloc info for external references and off-heap targets is
153
  // needed whenever code is serialized, e.g. into the snapshot or as a Wasm
154 155 156
  // module. This flag allows this reloc info to be disabled for code that
  // will not survive process destruction.
  bool record_reloc_info_for_serialization = true;
157 158 159 160
  // Recording reloc info can be disabled wholesale. This is needed when the
  // assembler is used on existing code directly (e.g. JumpTableAssembler)
  // without any buffer to hold reloc information.
  bool disable_reloc_info_for_patching = false;
161 162 163
  // Enables root-relative access to arbitrary untagged addresses (usually
  // external references). Only valid if code will not survive the process.
  bool enable_root_relative_access = false;
164 165 166 167 168 169 170 171
  // Enables specific assembler sequences only used for the simulator.
  bool enable_simulator_code = false;
  // Enables use of isolate-independent constants, indirected through the
  // root array.
  // (macro assembler feature).
  bool isolate_independent_code = false;
  // Enables the use of isolate-independent builtins through an off-heap
  // trampoline. (macro assembler feature).
172
  bool inline_offheap_trampolines = true;
173 174 175 176 177
  // Enables generation of pc-relative calls to builtins if the off-heap
  // builtins are guaranteed to be within the reach of pc-relative call or jump
  // instructions. For example, when the bultins code is re-embedded into the
  // code range.
  bool short_builtin_calls = false;
178 179 180
  // On some platforms, all code is created within a certain address range in
  // the process, and the base of this code range is configured here.
  Address code_range_base = 0;
181 182 183 184
  // Enable pc-relative calls/jumps on platforms that support it. When setting
  // this flag, the code range must be small enough to fit all offsets into
  // the instruction immediates.
  bool use_pc_relative_calls_and_jumps = false;
185 186 187 188
  // Enables the collection of information useful for the generation of unwind
  // info. This is useful in some platform (Win64) where the unwind info depends
  // on a function prologue/epilogue.
  bool collect_win64_unwind_info = false;
189 190
  // Whether to emit code comments.
  bool emit_code_comments = FLAG_code_comments;
191

192
  static AssemblerOptions Default(Isolate* isolate);
193
  static AssemblerOptions DefaultForOffHeapTrampoline(Isolate* isolate);
194 195
};

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
class AssemblerBuffer {
 public:
  virtual ~AssemblerBuffer() = default;
  virtual byte* start() const = 0;
  virtual int size() const = 0;
  // Return a grown copy of this buffer. The contained data is uninitialized.
  // The data in {this} will still be read afterwards (until {this} is
  // destructed), but not written.
  virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
      V8_WARN_UNUSED_RESULT = 0;
};

// Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
// grow, so it must be large enough for all code emitted by the Assembler.
V8_EXPORT_PRIVATE
std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
                                                         int size);

// Allocate a new growable AssemblerBuffer with a given initial size.
215
V8_EXPORT_PRIVATE
216 217
std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);

218
class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
219
 public:
220 221
  AssemblerBase(const AssemblerOptions& options,
                std::unique_ptr<AssemblerBuffer>);
222
  virtual ~AssemblerBase();
223

224
  const AssemblerOptions& options() const { return options_; }
225 226 227

  bool predictable_code_size() const { return predictable_code_size_; }
  void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
228

229 230 231 232
  uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
  void set_enabled_cpu_features(uint64_t features) {
    enabled_cpu_features_ = features;
  }
233 234
  // Features are usually enabled by CpuFeatureScope, which also asserts that
  // the features are supported before they are enabled.
235 236
  // IMPORTANT:  IsEnabled() should only be used by DCHECKs. For real feature
  // detection, use IsSupported().
237 238 239
  bool IsEnabled(CpuFeature f) {
    return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
  }
240 241 242
  void EnableCpuFeature(CpuFeature f) {
    enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
  }
243

244 245
  bool is_constant_pool_available() const {
    if (FLAG_enable_embedded_constant_pool) {
246 247 248
      // We need to disable constant pool here for embeded builtins
      // because the metadata section is not adjacent to instructions
      return constant_pool_available_ && !options().isolate_independent_code;
249
    } else {
250
      // Embedded constant pool not supported on this architecture.
251 252 253 254
      UNREACHABLE();
    }
  }

255 256 257 258 259 260 261
  JumpOptimizationInfo* jump_optimization_info() {
    return jump_optimization_info_;
  }
  void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
    jump_optimization_info_ = jump_opt;
  }

262 263
  void FinalizeJumpOptimizationInfo() {}

264 265
  // Overwrite a host NaN with a quiet target NaN.  Used by mksnapshot for
  // cross-snapshotting.
266
  static void QuietNaN(HeapObject nan) {}
267

268
  int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
269

270
  int pc_offset_for_safepoint() {
271 272 273 274
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
    defined(V8_TARGET_ARCH_LOONG64)
    // MIPS and LOONG need to use their own implementation to avoid trampoline's
    // influence.
275 276 277 278 279 280
    UNREACHABLE();
#else
    return pc_offset();
#endif
  }

281 282 283 284
  byte* buffer_start() const { return buffer_->start(); }
  int buffer_size() const { return buffer_->size(); }
  int instruction_size() const { return pc_offset(); }

285 286 287 288 289 290 291 292 293
  std::unique_ptr<AssemblerBuffer> ReleaseBuffer() {
    std::unique_ptr<AssemblerBuffer> buffer = std::move(buffer_);
    DCHECK_NULL(buffer_);
    // Reset fields to prevent accidental further modifications of the buffer.
    buffer_start_ = nullptr;
    pc_ = nullptr;
    return buffer;
  }

294 295
  // This function is called when code generation is aborted, so that
  // the assembler could clean up internal data structures.
296
  virtual void AbortedCodeGeneration() {}
297

298
  // Debugging
299
  void Print(Isolate* isolate);
300

301 302
  // Record an inline code comment that can be used by a disassembler.
  // Use --code-comments to enable.
303
  V8_INLINE void RecordComment(const char* comment) {
304 305 306
    // Set explicit dependency on --code-comments for dead-code elimination in
    // release builds.
    if (!FLAG_code_comments) return;
307
    if (options().emit_code_comments) {
308
      code_comments_writer_.Add(pc_offset(), std::string(comment));
309 310 311
    }
  }

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
  V8_INLINE void RecordComment(std::string comment) {
    // Set explicit dependency on --code-comments for dead-code elimination in
    // release builds.
    if (!FLAG_code_comments) return;
    if (options().emit_code_comments) {
      code_comments_writer_.Add(pc_offset(), std::move(comment));
    }
  }

#ifdef V8_CODE_COMMENTS
  class CodeComment {
   public:
    explicit CodeComment(Assembler* assembler, const std::string& comment)
        : assembler_(assembler) {
      if (FLAG_code_comments) Open(comment);
    }
    ~CodeComment() {
      if (FLAG_code_comments) Close();
    }
    static const int kIndentWidth = 2;

   private:
    int depth() const;
    void Open(const std::string& comment);
    void Close();
    Assembler* assembler_;
  };
#else  // V8_CODE_COMMENTS
  class CodeComment {
    explicit CodeComment(Assembler* assembler, std::string comment) {}
  };
#endif

345 346 347 348 349 350 351
  // The minimum buffer size. Should be at least two times the platform-specific
  // {Assembler::kGap}.
  static constexpr int kMinimalBufferSize = 128;

  // The default buffer size used if we do not know the final size of the
  // generated code.
  static constexpr int kDefaultBufferSize = 4 * KB;
352 353

 protected:
354 355
  // Add 'target' to the {code_targets_} vector, if necessary, and return the
  // offset at which it is stored.
356 357
  int AddCodeTarget(Handle<CodeT> target);
  Handle<CodeT> GetCodeTarget(intptr_t code_target_index) const;
358

359 360 361 362 363
  // Add 'object' to the {embedded_objects_} vector and return the index at
  // which it is stored.
  using EmbeddedObjectIndex = size_t;
  EmbeddedObjectIndex AddEmbeddedObject(Handle<HeapObject> object);
  Handle<HeapObject> GetEmbeddedObject(EmbeddedObjectIndex index) const;
364

365 366 367 368
  // The buffer into which code and relocation info are generated.
  std::unique_ptr<AssemblerBuffer> buffer_;
  // Cached from {buffer_->start()}, for faster access.
  byte* buffer_start_;
369 370 371 372
  std::forward_list<HeapObjectRequest> heap_object_requests_;
  // The program counter, which points into the buffer above and moves forward.
  // TODO(jkummerow): This should probably have type {Address}.
  byte* pc_;
373

374 375 376
  void set_constant_pool_available(bool available) {
    if (FLAG_enable_embedded_constant_pool) {
      constant_pool_available_ = available;
377
    } else {
378
      // Embedded constant pool not supported on this architecture.
379 380 381 382
      UNREACHABLE();
    }
  }

383 384 385 386 387
  // {RequestHeapObject} records the need for a future heap number allocation,
  // code stub generation or string allocation. After code assembly, each
  // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
  // allocate these objects and place them where they are expected (determined
  // by the pc offset associated with each request).
388
  void RequestHeapObject(HeapObjectRequest request);
389

390
  bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
391
    DCHECK(!RelocInfo::IsNoInfo(rmode));
392 393
    if (options().disable_reloc_info_for_patching) return false;
    if (RelocInfo::IsOnlyForSerializer(rmode) &&
394
        !options().record_reloc_info_for_serialization && !FLAG_debug_code) {
395 396
      return false;
    }
397 398 399
#ifndef ENABLE_DISASSEMBLER
    if (RelocInfo::IsLiteralConstant(rmode)) return false;
#endif
400 401 402
    return true;
  }

403 404
  CodeCommentsWriter code_comments_writer_;

405
 private:
406 407 408 409 410 411
  // Before we copy code into the code space, we sometimes cannot encode
  // call/jump code targets as we normally would, as the difference between the
  // instruction's location in the temporary buffer and the call target is not
  // guaranteed to fit in the instruction's offset field. We keep track of the
  // code handles we encounter in calls in this vector, and encode the index of
  // the code handle in the vector instead.
412
  std::vector<Handle<CodeT>> code_targets_;
413

414 415 416 417 418
  // If an assembler needs a small number to refer to a heap object handle
  // (for example, because there are only 32bit available on a 64bit arch), the
  // assembler adds the object into this vector using AddEmbeddedObject, and
  // may then refer to the heap object using the handle's index in this vector.
  std::vector<Handle<HeapObject>> embedded_objects_;
419

420 421 422 423 424 425 426
  // Embedded objects are deduplicated based on handle location. This is a
  // compromise that is almost as effective as deduplication based on actual
  // heap object addresses maintains GC safety.
  std::unordered_map<Handle<HeapObject>, EmbeddedObjectIndex,
                     Handle<HeapObject>::hash, Handle<HeapObject>::equal_to>
      embedded_objects_map_;

427
  const AssemblerOptions options_;
428
  uint64_t enabled_cpu_features_;
429
  bool predictable_code_size_;
430 431 432

  // Indicates whether the constant pool can be accessed, which is only possible
  // if the pp register points to the current code object's constant pool.
433
  bool constant_pool_available_;
434

435 436
  JumpOptimizationInfo* jump_optimization_info_;

437 438 439 440
#ifdef V8_CODE_COMMENTS
  int comment_depth_ = 0;
#endif

441 442 443
  // Constant pool.
  friend class FrameAndConstantPoolScope;
  friend class ConstantPoolUnavailableScope;
444 445
};

446
// Enable a specified feature within a scope.
447
class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
448
 public:
449 450 451 452 453
  enum CheckPolicy {
    kCheckSupported,
    kDontCheckSupported,
  };

454
#ifdef DEBUG
455 456
  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
                  CheckPolicy check = kCheckSupported);
457 458 459 460 461 462
  ~CpuFeatureScope();

 private:
  AssemblerBase* assembler_;
  uint64_t old_enabled_;
#else
463 464
  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
                  CheckPolicy check = kCheckSupported) {}
465
  ~CpuFeatureScope() {
466 467
    // Define a destructor to avoid unused variable warnings.
  }
468 469 470
#endif
};

471 472 473
#ifdef V8_CODE_COMMENTS
#define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__)
#define ASM_CODE_COMMENT_STRING(asm, comment) \
474
  AssemblerBase::CodeComment UNIQUE_IDENTIFIER(asm_code_comment)(asm, comment)
475 476 477 478 479
#else
#define ASM_CODE_COMMENT(asm)
#define ASM_CODE_COMMENT_STRING(asm, ...)
#endif

480 481
}  // namespace internal
}  // namespace v8
482
#endif  // V8_CODEGEN_ASSEMBLER_H_