wasm-code-manager.h 30.9 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_
7

8
#include <atomic>
9
#include <list>
10
#include <map>
11
#include <memory>
12
#include <unordered_set>
13 14
#include <utility>
#include <vector>
15

16
#include "src/base/address-region.h"
17
#include "src/base/macros.h"
18
#include "src/base/optional.h"
19
#include "src/builtins/builtins-definitions.h"
20
#include "src/handles/handles.h"
21
#include "src/trap-handler/trap-handler.h"
22
#include "src/utils/vector.h"
23
#include "src/wasm/compilation-environment.h"
24
#include "src/wasm/wasm-features.h"
25
#include "src/wasm/wasm-limits.h"
26
#include "src/wasm/wasm-module-sourcemap.h"
27
#include "src/wasm/wasm-tier.h"
28

29 30
namespace v8 {
namespace internal {
31 32

class Code;
33 34
class CodeDesc;
class Isolate;
35

36 37
namespace wasm {

38
class NativeModule;
39
class WasmCodeManager;
40
struct WasmCompilationResult;
41
class WasmEngine;
42
class WasmImportWrapperCache;
43 44
struct WasmModule;

45
// Sorted, disjoint and non-overlapping memory regions. A region is of the
46 47 48 49
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
 public:
50
  MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool);
51 52
  explicit DisjointAllocationPool(base::AddressRegion region)
      : regions_({region}) {}
53

54 55 56
  // Merge the parameter region into this object while preserving ordering of
  // the regions. The assumption is that the passed parameter is not
  // intersecting this object - for example, it was obtained from a previous
57 58
  // Allocate. Returns the merged region.
  base::AddressRegion Merge(base::AddressRegion);
59

60
  // Allocate a contiguous region of size {size}. Return an empty pool on
61
  // failure.
62
  base::AddressRegion Allocate(size_t size);
63

64 65 66 67
  // Allocate a contiguous region of size {size} within {region}. Return an
  // empty pool on failure.
  base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);

68 69
  bool IsEmpty() const { return regions_.empty(); }
  const std::list<base::AddressRegion>& regions() const { return regions_; }
70 71

 private:
72
  std::list<base::AddressRegion> regions_;
73 74
};

75 76 77
class V8_EXPORT_PRIVATE WasmCode final {
 public:
  enum Kind {
78
    kFunction,
79
    kWasmToCapiWrapper,
80
    kWasmToJsWrapper,
81
    kInterpreterEntry,
82
    kJumpTable
83 84
  };

85 86 87
  // Each runtime stub is identified by an id. This id is used to reference the
  // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
  enum RuntimeStubId {
88 89 90 91
#define DEF_ENUM(Name) k##Name,
#define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
    WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
#undef DEF_ENUM_TRAP
92 93 94 95
#undef DEF_ENUM
        kRuntimeStubCount
  };

96
  Vector<byte> instructions() const { return instructions_; }
97
  Address instruction_start() const {
98
    return reinterpret_cast<Address>(instructions_.begin());
99
  }
100
  Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
101
  Vector<const byte> source_positions() const {
102
    return source_position_table_.as_vector();
103
  }
104

105 106 107 108
  uint32_t index() const {
    DCHECK(!IsAnonymous());
    return index_;
  }
109
  // Anonymous functions are functions that don't carry an index.
110
  bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
111
  Kind kind() const { return kind_; }
112
  NativeModule* native_module() const { return native_module_; }
113
  ExecutionTier tier() const { return tier_; }
114
  Address constant_pool() const;
115 116
  Address handler_table() const;
  uint32_t handler_table_size() const;
117
  Address code_comments() const;
118
  uint32_t code_comments_size() const;
119 120
  size_t constant_pool_offset() const { return constant_pool_offset_; }
  size_t safepoint_table_offset() const { return safepoint_table_offset_; }
121
  size_t handler_table_offset() const { return handler_table_offset_; }
122 123
  size_t code_comments_offset() const { return code_comments_offset_; }
  size_t unpadded_binary_size() const { return unpadded_binary_size_; }
124
  uint32_t stack_slots() const { return stack_slots_; }
125
  uint32_t tagged_parameter_slots() const { return tagged_parameter_slots_; }
126
  bool is_liftoff() const { return tier_ == ExecutionTier::kLiftoff; }
127
  bool contains(Address pc) const {
128
    return reinterpret_cast<Address>(instructions_.begin()) <= pc &&
129 130
           pc < reinterpret_cast<Address>(instructions_.end());
  }
131

132 133 134
  Vector<trap_handler::ProtectedInstructionData> protected_instructions()
      const {
    return protected_instructions_.as_vector();
135 136
  }

137
  void Validate() const;
138
  void Print(const char* name = nullptr) const;
139
  void MaybePrint(const char* name = nullptr) const;
140
  void Disassemble(const char* name, std::ostream& os,
141
                   Address current_pc = kNullAddress) const;
142

143 144 145
  static bool ShouldBeLogged(Isolate* isolate);
  void LogCode(Isolate* isolate) const;

146 147
  ~WasmCode();

148
  void IncRef() {
149
    int old_val = ref_count_.fetch_add(1, std::memory_order_acq_rel);
150 151 152 153 154
    DCHECK_LE(1, old_val);
    DCHECK_GT(kMaxInt, old_val);
    USE(old_val);
  }

155 156
  // Decrement the ref count. Returns whether this code becomes dead and needs
  // to be freed.
157
  V8_WARN_UNUSED_RESULT bool DecRef() {
158
    int old_count = ref_count_.load(std::memory_order_acquire);
159 160 161 162
    while (true) {
      DCHECK_LE(1, old_count);
      if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
      if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
163
                                           std::memory_order_acq_rel)) {
164 165 166
        return false;
      }
    }
167 168
  }

169 170 171 172
  // Decrement the ref count on code that is known to be dead, even though there
  // might still be C++ references. Returns whether this drops the last
  // reference and the code needs to be freed.
  V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode() {
173
    return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
174 175
  }

176 177
  // Decrement the ref count on a set of {WasmCode} objects, potentially
  // belonging to different {NativeModule}s. Dead code will be deleted.
178
  static void DecrementRefCount(Vector<WasmCode* const>);
179

180 181
  enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };

182 183
  STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);

184 185 186
 private:
  friend class NativeModule;

187
  WasmCode(NativeModule* native_module, uint32_t index,
188
           Vector<byte> instructions, uint32_t stack_slots,
189 190 191
           uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
           size_t handler_table_offset, size_t constant_pool_offset,
           size_t code_comments_offset, size_t unpadded_binary_size,
192 193
           OwnedVector<trap_handler::ProtectedInstructionData>
               protected_instructions,
194
           OwnedVector<const byte> reloc_info,
195 196
           OwnedVector<const byte> source_position_table, Kind kind,
           ExecutionTier tier)
197 198
      : instructions_(instructions),
        reloc_info_(std::move(reloc_info)),
199
        source_position_table_(std::move(source_position_table)),
200
        native_module_(native_module),
201 202 203 204
        index_(index),
        kind_(kind),
        constant_pool_offset_(constant_pool_offset),
        stack_slots_(stack_slots),
205
        tagged_parameter_slots_(tagged_parameter_slots),
206
        safepoint_table_offset_(safepoint_table_offset),
207
        handler_table_offset_(handler_table_offset),
208 209
        code_comments_offset_(code_comments_offset),
        unpadded_binary_size_(unpadded_binary_size),
210
        protected_instructions_(std::move(protected_instructions)),
211
        tier_(tier) {
212 213 214 215
    DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
    DCHECK_LE(handler_table_offset, unpadded_binary_size);
    DCHECK_LE(code_comments_offset, unpadded_binary_size);
    DCHECK_LE(constant_pool_offset, unpadded_binary_size);
216
  }
217

218 219
  // Code objects that have been registered with the global trap handler within
  // this process, will have a {trap_handler_index} associated with them.
220 221 222 223 224 225 226 227 228
  int trap_handler_index() const {
    CHECK(has_trap_handler_index());
    return trap_handler_index_;
  }
  void set_trap_handler_index(int value) {
    CHECK(!has_trap_handler_index());
    trap_handler_index_ = value;
  }
  bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
229 230 231 232

  // Register protected instruction information with the trap handler. Sets
  // trap_handler_index.
  void RegisterTrapHandlerData();
233

234 235
  // Slow path for {DecRef}: The code becomes potentially dead.
  // Returns whether this code becomes dead and needs to be freed.
236
  V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
237

238
  Vector<byte> instructions_;
239 240
  OwnedVector<const byte> reloc_info_;
  OwnedVector<const byte> source_position_table_;
241
  NativeModule* native_module_ = nullptr;
242
  uint32_t index_;
243 244 245
  Kind kind_;
  size_t constant_pool_offset_ = 0;
  uint32_t stack_slots_ = 0;
246 247 248
  // Number of tagged parameters passed to this function via the stack. This
  // value is used by the stack walker (e.g. GC) to find references.
  uint32_t tagged_parameter_slots_ = 0;
249 250 251 252
  // we care about safepoint data for wasm-to-js functions,
  // since there may be stack/register tagged values for large number
  // conversions.
  size_t safepoint_table_offset_ = 0;
253
  size_t handler_table_offset_ = 0;
254 255
  size_t code_comments_offset_ = 0;
  size_t unpadded_binary_size_ = 0;
256
  int trap_handler_index_ = -1;
257
  OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
258
  ExecutionTier tier_;
259

260
  // WasmCode is ref counted. Counters are held by:
261 262 263 264 265 266 267 268 269
  //   1) The jump table / code table.
  //   2) {WasmCodeRefScope}s.
  //   3) The set of potentially dead code in the {WasmEngine}.
  // If a decrement of (1) would drop the ref count to 0, that code becomes a
  // candidate for garbage collection. At that point, we add a ref count for (3)
  // *before* decrementing the counter to ensure the code stays alive as long as
  // it's being used. Once the ref count drops to zero (i.e. after being removed
  // from (3) and all (2)), the code object is deleted and the memory for the
  // machine code is freed.
270 271
  std::atomic<int> ref_count_{1};

272
  DISALLOW_COPY_AND_ASSIGN(WasmCode);
273 274
};

275 276
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);

277 278 279
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);

280 281 282
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
 public:
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
  // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
  // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
  // optional to allow to also call methods without holding the lock.
  class OptionalLock {
   public:
    // External users can only instantiate a non-locked {OptionalLock}.
    OptionalLock() = default;
    ~OptionalLock();
    bool is_locked() const { return allocator_ != nullptr; }

   private:
    friend class WasmCodeAllocator;
    // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
    // is passed.
    void Lock(WasmCodeAllocator*);

    WasmCodeAllocator* allocator_ = nullptr;
  };

302
  WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
303 304
                    bool can_request_more,
                    std::shared_ptr<Counters> async_counters);
305 306
  ~WasmCodeAllocator();

307 308 309
  // Call before use, after the {NativeModule} is set up completely.
  void Init(NativeModule*);

310 311 312 313 314 315 316 317 318 319 320 321 322
  size_t committed_code_space() const {
    return committed_code_space_.load(std::memory_order_acquire);
  }
  size_t generated_code_size() const {
    return generated_code_size_.load(std::memory_order_acquire);
  }
  size_t freed_code_size() const {
    return freed_code_size_.load(std::memory_order_acquire);
  }

  // Allocate code space. Returns a valid buffer or fails with OOM (crash).
  Vector<byte> AllocateForCode(NativeModule*, size_t size);

323 324 325
  // Allocate code space within a specific region. Returns a valid buffer or
  // fails with OOM (crash).
  Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
326 327
                                       base::AddressRegion,
                                       const WasmCodeAllocator::OptionalLock&);
328

329 330 331 332 333 334 335
  // Sets permissions of all owned code space to executable, or read-write (if
  // {executable} is false). Returns true on success.
  V8_EXPORT_PRIVATE bool SetExecutable(bool executable);

  // Free memory pages of all given code objects. Used for wasm code GC.
  void FreeCode(Vector<WasmCode* const>);

336 337 338
  // Retrieve the number of separately reserved code spaces.
  size_t GetNumCodeSpaces() const;

339 340 341 342
 private:
  // The engine-wide wasm code manager.
  WasmCodeManager* const code_manager_;

343
  mutable base::Mutex mutex_;
344 345 346 347 348 349 350 351 352 353

  //////////////////////////////////////////////////////////////////////////////
  // Protected by {mutex_}:

  // Code space that was reserved and is available for allocations (subset of
  // {owned_code_space_}).
  DisjointAllocationPool free_code_space_;
  // Code space that was allocated for code (subset of {owned_code_space_}).
  DisjointAllocationPool allocated_code_space_;
  // Code space that was allocated before but is dead now. Full pages within
354
  // this region are discarded. It's still a subset of {owned_code_space_}.
355 356 357 358 359 360 361 362 363 364 365 366
  DisjointAllocationPool freed_code_space_;
  std::vector<VirtualMemory> owned_code_space_;

  // End of fields protected by {mutex_}.
  //////////////////////////////////////////////////////////////////////////////

  std::atomic<size_t> committed_code_space_{0};
  std::atomic<size_t> generated_code_size_{0};
  std::atomic<size_t> freed_code_size_{0};

  bool is_executable_ = false;

367
  // TODO(clemensb): Remove this field once multiple code spaces are supported
368
  // everywhere.
369
  const bool can_request_more_memory_;
370 371

  std::shared_ptr<Counters> async_counters_;
372 373
};

374 375
class V8_EXPORT_PRIVATE NativeModule final {
 public:
376
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
377
  static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
378
#else
379
  static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
380 381
#endif

382 383
  // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
  // code below, i.e. it can be called concurrently from background threads.
384 385 386 387 388 389 390
  // The returned code still needs to be published via {PublishCode}.
  std::unique_ptr<WasmCode> AddCode(
      uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
      uint32_t tagged_parameter_slots,
      OwnedVector<trap_handler::ProtectedInstructionData>
          protected_instructions,
      OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
391
      ExecutionTier tier);
392 393 394 395

  // {PublishCode} makes the code available to the system by entering it into
  // the code table and patching the jump table. It returns a raw pointer to the
  // given {WasmCode} object.
396
  WasmCode* PublishCode(std::unique_ptr<WasmCode>);
397
  // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
398
  WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
399

400 401
  WasmCode* AddDeserializedCode(
      uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
402 403 404
      uint32_t tagged_parameter_slots, size_t safepoint_table_offset,
      size_t handler_table_offset, size_t constant_pool_offset,
      size_t code_comments_offset, size_t unpadded_binary_size,
405 406 407
      OwnedVector<trap_handler::ProtectedInstructionData>
          protected_instructions,
      OwnedVector<const byte> reloc_info,
408
      OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
409
      ExecutionTier tier);
410

411 412 413
  // Adds anonymous code for testing purposes.
  WasmCode* AddCodeForTesting(Handle<Code> code);

414 415 416
  // Use {UseLazyStub} to setup lazy compilation per function. It will use the
  // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
  // table with trampolines accordingly.
417
  void UseLazyStub(uint32_t func_index);
418

419 420 421 422
  // Creates a snapshot of the current state of the code table. This is useful
  // to get a consistent view of the table (e.g. used by the serializer).
  std::vector<WasmCode*> SnapshotCodeTable() const;

423 424
  WasmCode* GetCode(uint32_t index) const;
  bool HasCode(uint32_t index) const;
425

426 427 428
  void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
  WasmModuleSourceMap* GetWasmSourceMap() const;

429
  Address jump_table_start() const {
430 431
    return main_jump_table_ ? main_jump_table_->instruction_start()
                            : kNullAddress;
432 433
  }

434
  uint32_t GetJumpTableOffset(uint32_t func_index) const;
435

436 437
  // Returns the canonical target to call for the given function (the slot in
  // the first jump table).
438
  Address GetCallTargetForFunction(uint32_t func_index) const;
439

440 441 442 443 444 445 446 447 448 449 450 451 452
  struct JumpTablesRef {
    const Address jump_table_start;
    const Address far_jump_table_start;
  };

  // Finds the jump tables that should be used for the code at {code_addr}. This
  // information is then passed to {GetNearCallTargetForFunction} and
  // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
  // up there.
  JumpTablesRef FindJumpTablesForCode(Address code_addr) const;

  // Similarly to {GetCallTargetForFunction}, but uses the jump table previously
  // looked up via {FindJumpTablesForCode}.
453
  Address GetNearCallTargetForFunction(uint32_t func_index,
454
                                       const JumpTablesRef&) const;
455

456 457
  // Get a runtime stub entry (which is a far jump table slot) in the jump table
  // previously looked up via {FindJumpTablesForCode}.
458
  Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
459
                                  const JumpTablesRef&) const;
460

461 462
  // Reverse lookup from a given call target (which must be a jump table slot)
  // to a function index.
463 464
  uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;

465 466 467
  bool SetExecutable(bool executable) {
    return code_allocator_.SetExecutable(executable);
  }
468

469 470
  // For cctests, where we build both WasmModule and the runtime objects
  // on the fly, and bypass the instance builder pipeline.
471
  void ReserveCodeTableForTesting(uint32_t max_functions);
472

473 474
  void LogWasmCodes(Isolate* isolate);

475 476
  CompilationState* compilation_state() { return compilation_state_.get(); }

477 478 479
  // Create a {CompilationEnv} object for compilation. The caller has to ensure
  // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
  // being used.
480
  CompilationEnv CreateCompilationEnv() const;
481

482 483 484 485 486 487
  uint32_t num_functions() const {
    return module_->num_declared_functions + module_->num_imported_functions;
  }
  uint32_t num_imported_functions() const {
    return module_->num_imported_functions;
  }
488
  UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
489 490
  void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
  bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
491
  Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
492
  const WasmModule* module() const { return module_.get(); }
493
  std::shared_ptr<const WasmModule> shared_module() const { return module_; }
494 495 496
  size_t committed_code_space() const {
    return code_allocator_.committed_code_space();
  }
497
  WasmEngine* engine() const { return engine_; }
498

499
  void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
500

501 502
  WasmCode* Lookup(Address) const;

503 504 505 506
  WasmImportWrapperCache* import_wrapper_cache() const {
    return import_wrapper_cache_.get();
  }

507 508
  ~NativeModule();

509 510
  const WasmFeatures& enabled_features() const { return enabled_features_; }

511 512 513 514 515
  // Returns the runtime stub id that corresponds to the given address (which
  // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
  WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;

  const char* GetRuntimeStubName(Address runtime_stub_target) const;
516

517
  // Sample the current code size of this modules to the given counters.
518
  enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
519 520
  void SampleCodeSize(Counters*, CodeSamplingTime) const;

521 522
  WasmCode* AddCompiledCode(WasmCompilationResult);
  std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
523

524 525 526 527
  // Allows to check whether a function has been redirected to the interpreter
  // by publishing an entry stub with the {Kind::kInterpreterEntry} code kind.
  bool IsRedirectedToInterpreter(uint32_t func_index);

528 529 530
  // Free a set of functions of this module. Uncommits whole pages if possible.
  // The given vector must be ordered by the instruction start address, and all
  // {WasmCode} objects must not be used any more.
531 532
  // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
  // its accounting.
533 534
  void FreeCode(Vector<WasmCode* const>);

535 536 537
  // Retrieve the number of separately reserved code spaces for this module.
  size_t GetNumberOfCodeSpacesForTesting() const;

538
 private:
539
  friend class WasmCode;
540
  friend class WasmCodeAllocator;
541
  friend class WasmCodeManager;
542
  friend class NativeModuleModificationScope;
543

544 545 546
  struct CodeSpaceData {
    base::AddressRegion region;
    WasmCode* jump_table;
547
    WasmCode* far_jump_table;
548 549
  };

550
  // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
551 552
  NativeModule(WasmEngine* engine, const WasmFeatures& enabled_features,
               bool can_request_more, VirtualMemory code_space,
553
               std::shared_ptr<const WasmModule> module,
554 555
               std::shared_ptr<Counters> async_counters,
               std::shared_ptr<NativeModule>* shared_this);
556

557 558 559 560 561 562
  std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
      uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
      uint32_t tagged_parameter_slots,
      OwnedVector<trap_handler::ProtectedInstructionData>
          protected_instructions,
      OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
563 564
      ExecutionTier tier, Vector<uint8_t> code_space,
      const JumpTablesRef& jump_tables_ref);
565

566 567 568
  WasmCode* CreateEmptyJumpTableInRegion(
      uint32_t jump_table_size, base::AddressRegion,
      const WasmCodeAllocator::OptionalLock&);
569

570
  // Hold the {allocation_mutex_} when calling one of these methods.
571 572 573 574
  // {slot_index} is the index in the declared functions, i.e. function index
  // minus the number of imported functions.
  void PatchJumpTablesLocked(uint32_t slot_index, Address target);
  void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
575
                            Address target);
576

577
  // Called by the {WasmCodeAllocator} to register a new code space.
578 579
  void AddCodeSpace(base::AddressRegion,
                    const WasmCodeAllocator::OptionalLock&);
580

581
  // Hold the {allocation_mutex_} when calling this method.
582 583 584 585 586 587 588 589 590
  bool has_interpreter_redirection(uint32_t func_index) {
    DCHECK_LT(func_index, num_functions());
    DCHECK_LE(module_->num_imported_functions, func_index);
    if (!interpreter_redirections_) return false;
    uint32_t bitset_idx = func_index - module_->num_imported_functions;
    uint8_t byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
    return byte & (1 << (bitset_idx % kBitsPerByte));
  }

591
  // Hold the {allocation_mutex_} when calling this method.
592 593 594 595 596 597
  void SetInterpreterRedirection(uint32_t func_index) {
    DCHECK_LT(func_index, num_functions());
    DCHECK_LE(module_->num_imported_functions, func_index);
    if (!interpreter_redirections_) {
      interpreter_redirections_.reset(
          new uint8_t[RoundUp<kBitsPerByte>(module_->num_declared_functions) /
598
                      kBitsPerByte]{});
599 600 601 602 603 604
    }
    uint32_t bitset_idx = func_index - module_->num_imported_functions;
    uint8_t& byte = interpreter_redirections_[bitset_idx / kBitsPerByte];
    byte |= 1 << (bitset_idx % kBitsPerByte);
  }

605 606 607 608
  // {WasmCodeAllocator} manages all code reservations and allocations for this
  // {NativeModule}.
  WasmCodeAllocator code_allocator_;

609 610 611 612 613
  // Features enabled for this module. We keep a copy of the features that
  // were enabled at the time of the creation of this native module,
  // to be consistent across asynchronous compilations later.
  const WasmFeatures enabled_features_;

614 615
  // The decoded module, stored in a shared_ptr such that background compile
  // tasks can keep this alive.
616 617
  std::shared_ptr<const WasmModule> module_;

618 619
  std::unique_ptr<WasmModuleSourceMap> source_map_;

620 621 622
  // Wire bytes, held in a shared_ptr so they can be kept alive by the
  // {WireBytesStorage}, held by background compile tasks.
  std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
623

624 625 626
  // Jump table used by external calls (from JS). Wasm calls use one of the jump
  // tables stored in {code_space_data_}.
  WasmCode* main_jump_table_ = nullptr;
627

628 629 630 631
  // Lazy compile stub table, containing entries to jump to the
  // {WasmCompileLazy} builtin, passing the function index.
  WasmCode* lazy_compile_table_ = nullptr;

632 633 634
  // The compilation state keeps track of compilation tasks for this module.
  // Note that its destructor blocks until all tasks are finished/aborted and
  // hence needs to be destructed first when this native module dies.
635
  std::unique_ptr<CompilationState> compilation_state_;
636

637 638 639
  // A cache of the import wrappers, keyed on the kind and signature.
  std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;

640
  // This mutex protects concurrent calls to {AddCode} and friends.
641 642
  mutable base::Mutex allocation_mutex_;

643 644 645
  //////////////////////////////////////////////////////////////////////////////
  // Protected by {allocation_mutex_}:

646 647 648
  // Holds all allocated code objects. For lookup based on pc, the key is the
  // instruction start address of the value.
  std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
649

650
  // Table of the latest code object per function, updated on initial
651 652 653
  // compilation and tier up. The number of entries is
  // {WasmModule::num_declared_functions}, i.e. there are no entries for
  // imported functions.
654
  std::unique_ptr<WasmCode*[]> code_table_;
655

656 657 658 659
  // Null if no redirections exist, otherwise a bitset over all functions in
  // this module marking those functions that have been redirected.
  std::unique_ptr<uint8_t[]> interpreter_redirections_;

660 661 662
  // Data (especially jump table) per code space.
  std::vector<CodeSpaceData> code_space_data_;

663 664 665
  // End of fields protected by {allocation_mutex_}.
  //////////////////////////////////////////////////////////////////////////////

666
  WasmEngine* const engine_;
667
  int modification_scope_depth_ = 0;
668
  UseTrapHandler use_trap_handler_ = kNoTrapHandler;
669
  bool lazy_compile_frozen_ = false;
670 671

  DISALLOW_COPY_AND_ASSIGN(NativeModule);
672 673 674 675
};

class V8_EXPORT_PRIVATE WasmCodeManager final {
 public:
676
  explicit WasmCodeManager(size_t max_committed);
677

678 679 680 681 682 683 684
#ifdef DEBUG
  ~WasmCodeManager() {
    // No more committed code space.
    DCHECK_EQ(0, total_committed_code_space_.load());
  }
#endif

685
#if defined(V8_OS_WIN64)
686
  bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
687
#endif  // V8_OS_WIN64
688

689
  NativeModule* LookupNativeModule(Address pc) const;
690
  WasmCode* LookupCode(Address pc) const;
691 692 693
  size_t committed_code_space() const {
    return total_committed_code_space_.load();
  }
694

695
  // Estimate the needed code space from a completely decoded module.
696
  static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
697 698 699 700 701
  // Estimate the needed code space from the number of functions and total code
  // section length.
  static size_t EstimateNativeModuleCodeSize(int num_functions,
                                             int num_imported_functions,
                                             int code_section_length);
702 703 704
  // Estimate the size of meta data needed for the NativeModule, excluding
  // generated code. This data still be stored on the C++ heap.
  static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
705

706
 private:
707
  friend class WasmCodeAllocator;
708 709
  friend class WasmEngine;

710
  std::shared_ptr<NativeModule> NewNativeModule(
711 712 713
      WasmEngine* engine, Isolate* isolate,
      const WasmFeatures& enabled_features, size_t code_size_estimate,
      bool can_request_more, std::shared_ptr<const WasmModule> module);
714

715 716
  V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
                                                  void* hint = nullptr);
717 718
  bool Commit(base::AddressRegion);
  void Decommit(base::AddressRegion);
719

720 721
  void FreeNativeModule(Vector<VirtualMemory> owned_code,
                        size_t committed_size);
722

723
  void AssignRange(base::AddressRegion, NativeModule*);
724

725
  const size_t max_committed_code_space_;
726

727
  std::atomic<size_t> total_committed_code_space_{0};
728 729 730 731 732 733
  // If the committed code space exceeds {critical_committed_code_space_}, then
  // we trigger a GC before creating the next module. This value is set to the
  // currently committed space plus 50% of the available code space on creation
  // and updated after each GC.
  std::atomic<size_t> critical_committed_code_space_;

734
  mutable base::Mutex native_modules_mutex_;
735 736 737 738

  //////////////////////////////////////////////////////////////////////////////
  // Protected by {native_modules_mutex_}:

739
  std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
740 741 742

  // End of fields protected by {native_modules_mutex_}.
  //////////////////////////////////////////////////////////////////////////////
743

744
  DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
745 746
};

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
// Within the scope, the native_module is writable and not executable.
// At the scope's destruction, the native_module is executable and not writable.
// The states inside the scope and at the scope termination are irrespective of
// native_module's state when entering the scope.
// We currently mark the entire module's memory W^X:
//  - for AOT, that's as efficient as it can be.
//  - for Lazy, we don't have a heuristic for functions that may need patching,
//    and even if we did, the resulting set of pages may be fragmented.
//    Currently, we try and keep the number of syscalls low.
// -  similar argument for debug time.
class NativeModuleModificationScope final {
 public:
  explicit NativeModuleModificationScope(NativeModule* native_module);
  ~NativeModuleModificationScope();

 private:
  NativeModule* native_module_;
};

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
// {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
// by e.g. creating new code or looking up code by its address are added to the
// top-most {WasmCodeRefScope}.
class V8_EXPORT_PRIVATE WasmCodeRefScope {
 public:
  WasmCodeRefScope();
  ~WasmCodeRefScope();

  // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
  // there is no current scope.
  static void AddRef(WasmCode*);

 private:
  WasmCodeRefScope* const previous_scope_;
  std::unordered_set<WasmCode*> code_ptrs_;

  DISALLOW_COPY_AND_ASSIGN(WasmCodeRefScope);
};

785 786 787 788 789 790 791 792 793 794
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
// ref-counted pointer to a {WasmCode} object.
class GlobalWasmCodeRef {
 public:
  explicit GlobalWasmCodeRef(WasmCode* code,
                             std::shared_ptr<NativeModule> native_module)
      : code_(code), native_module_(std::move(native_module)) {
    code_->IncRef();
  }

795
  ~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
796 797 798 799 800 801 802 803 804 805 806 807

  // Get a pointer to the contained {WasmCode} object. This is only guaranteed
  // to exist as long as this {GlobalWasmCodeRef} exists.
  WasmCode* code() const { return code_; }

 private:
  WasmCode* const code_;
  // Also keep the {NativeModule} alive.
  const std::shared_ptr<NativeModule> native_module_;
  DISALLOW_COPY_AND_ASSIGN(GlobalWasmCodeRef);
};

808 809 810
}  // namespace wasm
}  // namespace internal
}  // namespace v8
811 812

#endif  // V8_WASM_WASM_CODE_MANAGER_H_