wasm-code-manager.h 47.2 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6 7 8
#if !V8_ENABLE_WEBASSEMBLY
#error This header should only be included if WebAssembly is enabled.
#endif  // !V8_ENABLE_WEBASSEMBLY

9 10
#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_
11

12
#include <atomic>
13
#include <map>
14
#include <memory>
15
#include <set>
16 17
#include <utility>
#include <vector>
18

19
#include "src/base/address-region.h"
20
#include "src/base/bit-field.h"
21
#include "src/base/macros.h"
22
#include "src/base/optional.h"
23
#include "src/base/vector.h"
24
#include "src/builtins/builtins.h"
25
#include "src/handles/handles.h"
26
#include "src/tasks/operations-barrier.h"
27
#include "src/trap-handler/trap-handler.h"
28
#include "src/wasm/compilation-environment.h"
29
#include "src/wasm/memory-protection-key.h"
30
#include "src/wasm/wasm-features.h"
31
#include "src/wasm/wasm-limits.h"
32
#include "src/wasm/wasm-module-sourcemap.h"
33
#include "src/wasm/wasm-tier.h"
34

35 36
namespace v8 {
namespace internal {
37 38

class Code;
39 40
class CodeDesc;
class Isolate;
41

42 43
namespace wasm {

44
class DebugInfo;
45
class NativeModule;
46
struct WasmCompilationResult;
47
class WasmEngine;
48
class WasmImportWrapperCache;
49 50
struct WasmModule;

51 52
// Convenience macro listing all wasm runtime stubs. Note that the first few
// elements of the list coincide with {compiler::TrapId}, order matters.
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
#define WASM_RUNTIME_STUB_LIST(V, VTRAP)  \
  FOREACH_WASM_TRAPREASON(VTRAP)          \
  V(WasmCompileLazy)                      \
  V(WasmTriggerTierUp)                    \
  V(WasmDebugBreak)                       \
  V(WasmInt32ToHeapNumber)                \
  V(WasmTaggedNonSmiToInt32)              \
  V(WasmFloat32ToNumber)                  \
  V(WasmFloat64ToNumber)                  \
  V(WasmTaggedToFloat64)                  \
  V(WasmAllocateJSArray)                  \
  V(WasmAtomicNotify)                     \
  V(WasmI32AtomicWait32)                  \
  V(WasmI32AtomicWait64)                  \
  V(WasmI64AtomicWait32)                  \
  V(WasmI64AtomicWait64)                  \
  V(WasmGetOwnProperty)                   \
  V(WasmRefFunc)                          \
  V(WasmMemoryGrow)                       \
  V(WasmTableInit)                        \
  V(WasmTableCopy)                        \
  V(WasmTableFill)                        \
  V(WasmTableGrow)                        \
  V(WasmTableGet)                         \
  V(WasmTableSet)                         \
78 79
  V(WasmTableGetFuncRef)                  \
  V(WasmTableSetFuncRef)                  \
80 81 82 83 84
  V(WasmStackGuard)                       \
  V(WasmStackOverflow)                    \
  V(WasmAllocateFixedArray)               \
  V(WasmThrow)                            \
  V(WasmRethrow)                          \
85
  V(WasmRethrowExplicitContext)           \
86 87 88 89 90
  V(WasmTraceEnter)                       \
  V(WasmTraceExit)                        \
  V(WasmTraceMemory)                      \
  V(BigIntToI32Pair)                      \
  V(BigIntToI64)                          \
91
  V(CallRefIC)                            \
92 93 94 95 96 97 98 99
  V(DoubleToI)                            \
  V(I32PairToBigInt)                      \
  V(I64ToBigInt)                          \
  V(RecordWriteEmitRememberedSetSaveFP)   \
  V(RecordWriteOmitRememberedSetSaveFP)   \
  V(RecordWriteEmitRememberedSetIgnoreFP) \
  V(RecordWriteOmitRememberedSetIgnoreFP) \
  V(ToNumber)                             \
100 101 102 103
  IF_TSAN(V, TSANRelaxedStore8IgnoreFP)   \
  IF_TSAN(V, TSANRelaxedStore8SaveFP)     \
  IF_TSAN(V, TSANRelaxedStore16IgnoreFP)  \
  IF_TSAN(V, TSANRelaxedStore16SaveFP)    \
104 105 106 107
  IF_TSAN(V, TSANRelaxedStore32IgnoreFP)  \
  IF_TSAN(V, TSANRelaxedStore32SaveFP)    \
  IF_TSAN(V, TSANRelaxedStore64IgnoreFP)  \
  IF_TSAN(V, TSANRelaxedStore64SaveFP)    \
108 109 110 111 112 113 114 115
  IF_TSAN(V, TSANSeqCstStore8IgnoreFP)    \
  IF_TSAN(V, TSANSeqCstStore8SaveFP)      \
  IF_TSAN(V, TSANSeqCstStore16IgnoreFP)   \
  IF_TSAN(V, TSANSeqCstStore16SaveFP)     \
  IF_TSAN(V, TSANSeqCstStore32IgnoreFP)   \
  IF_TSAN(V, TSANSeqCstStore32SaveFP)     \
  IF_TSAN(V, TSANSeqCstStore64IgnoreFP)   \
  IF_TSAN(V, TSANSeqCstStore64SaveFP)     \
116 117 118 119
  IF_TSAN(V, TSANRelaxedLoad32IgnoreFP)   \
  IF_TSAN(V, TSANRelaxedLoad32SaveFP)     \
  IF_TSAN(V, TSANRelaxedLoad64IgnoreFP)   \
  IF_TSAN(V, TSANRelaxedLoad64SaveFP)     \
120
  V(WasmAllocateArray_Uninitialized)      \
121
  V(WasmArrayCopy)                        \
122
  V(WasmArrayCopyWithChecks)              \
123
  V(WasmArrayInitFromData)                \
124 125
  V(WasmAllocateStructWithRtt)            \
  V(WasmSubtypeCheck)                     \
126
  V(WasmOnStackReplace)                   \
127 128
  V(WasmSuspend)                          \
  V(WasmStringNewWtf8)
129

130
// Sorted, disjoint and non-overlapping memory regions. A region is of the
131 132 133 134
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
 public:
135
  MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool);
136 137
  explicit DisjointAllocationPool(base::AddressRegion region)
      : regions_({region}) {}
138

139 140 141
  // Merge the parameter region into this object. The assumption is that the
  // passed parameter is not intersecting this object - for example, it was
  // obtained from a previous Allocate. Returns the merged region.
142
  base::AddressRegion Merge(base::AddressRegion);
143

144
  // Allocate a contiguous region of size {size}. Return an empty region on
145
  // failure.
146
  base::AddressRegion Allocate(size_t size);
147

148
  // Allocate a contiguous region of size {size} within {region}. Return an
149
  // empty region on failure.
150 151
  base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);

152
  bool IsEmpty() const { return regions_.empty(); }
153 154

  const auto& regions() const { return regions_; }
155 156

 private:
157
  std::set<base::AddressRegion, base::AddressRegion::StartAddressLess> regions_;
158 159
};

160 161
class V8_EXPORT_PRIVATE WasmCode final {
 public:
162
  enum Kind { kWasmFunction, kWasmToCapiWrapper, kWasmToJsWrapper, kJumpTable };
163

164 165 166
  // Each runtime stub is identified by an id. This id is used to reference the
  // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
  enum RuntimeStubId {
167 168 169 170
#define DEF_ENUM(Name) k##Name,
#define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
    WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
#undef DEF_ENUM_TRAP
171 172 173 174
#undef DEF_ENUM
        kRuntimeStubCount
  };

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
  static constexpr RuntimeStubId GetRecordWriteStub(
      RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
    switch (remembered_set_action) {
      case RememberedSetAction::kEmit:
        switch (fp_mode) {
          case SaveFPRegsMode::kIgnore:
            return RuntimeStubId::kRecordWriteEmitRememberedSetIgnoreFP;
          case SaveFPRegsMode::kSave:
            return RuntimeStubId::kRecordWriteEmitRememberedSetSaveFP;
        }
      case RememberedSetAction::kOmit:
        switch (fp_mode) {
          case SaveFPRegsMode::kIgnore:
            return RuntimeStubId::kRecordWriteOmitRememberedSetIgnoreFP;
          case SaveFPRegsMode::kSave:
            return RuntimeStubId::kRecordWriteOmitRememberedSetSaveFP;
        }
    }
  }

195
#ifdef V8_IS_TSAN
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
  static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
                                        std::memory_order order) {
    if (order == std::memory_order_relaxed) {
      if (size == kInt8Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore8SaveFP;
      } else if (size == kInt16Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore16SaveFP;
      } else if (size == kInt32Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore32SaveFP;
      } else {
        CHECK_EQ(size, kInt64Size);
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore64SaveFP;
      }
217
    } else {
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
      DCHECK_EQ(order, std::memory_order_seq_cst);
      if (size == kInt8Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore8SaveFP;
      } else if (size == kInt16Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore16SaveFP;
      } else if (size == kInt32Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore32SaveFP;
      } else {
        CHECK_EQ(size, kInt64Size);
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore64SaveFP;
      }
237
    }
238
  }
239 240 241 242 243 244 245 246 247 248 249 250 251 252

  static RuntimeStubId GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode,
                                              int size) {
    if (size == kInt32Size) {
      return fp_mode == SaveFPRegsMode::kIgnore
                 ? RuntimeStubId::kTSANRelaxedLoad32IgnoreFP
                 : RuntimeStubId::kTSANRelaxedLoad32SaveFP;
    } else {
      CHECK_EQ(size, kInt64Size);
      return fp_mode == SaveFPRegsMode::kIgnore
                 ? RuntimeStubId::kTSANRelaxedLoad64IgnoreFP
                 : RuntimeStubId::kTSANRelaxedLoad64SaveFP;
    }
  }
253 254
#endif  // V8_IS_TSAN

255 256 257
  base::Vector<byte> instructions() const {
    return base::VectorOf(instructions_,
                          static_cast<size_t>(instructions_size_));
258
  }
259
  Address instruction_start() const {
260
    return reinterpret_cast<Address>(instructions_);
261
  }
262
  base::Vector<const byte> reloc_info() const {
263 264 265
    return {protected_instructions_data().end(),
            static_cast<size_t>(reloc_info_size_)};
  }
266
  base::Vector<const byte> source_positions() const {
267
    return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
268
  }
269

270
  int index() const { return index_; }
271
  // Anonymous functions are functions that don't carry an index.
272
  bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
273
  Kind kind() const { return KindField::decode(flags_); }
274
  NativeModule* native_module() const { return native_module_; }
275
  ExecutionTier tier() const { return ExecutionTierField::decode(flags_); }
276
  Address constant_pool() const;
277
  Address handler_table() const;
278
  int handler_table_size() const;
279
  Address code_comments() const;
280 281 282 283 284 285 286
  int code_comments_size() const;
  int constant_pool_offset() const { return constant_pool_offset_; }
  int safepoint_table_offset() const { return safepoint_table_offset_; }
  int handler_table_offset() const { return handler_table_offset_; }
  int code_comments_offset() const { return code_comments_offset_; }
  int unpadded_binary_size() const { return unpadded_binary_size_; }
  int stack_slots() const { return stack_slots_; }
287 288 289 290 291 292 293 294 295
  uint16_t first_tagged_parameter_slot() const {
    return tagged_parameter_slots_ >> 16;
  }
  uint16_t num_tagged_parameter_slots() const {
    return tagged_parameter_slots_ & 0xFFFF;
  }
  uint32_t raw_tagged_parameter_slots_for_serialization() const {
    return tagged_parameter_slots_;
  }
296

297
  bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
298 299 300

  bool is_turbofan() const { return tier() == ExecutionTier::kTurbofan; }

301
  bool contains(Address pc) const {
302 303
    return reinterpret_cast<Address>(instructions_) <= pc &&
           pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
304
  }
305

306 307 308 309
  // Only Liftoff code that was generated for debugging can be inspected
  // (otherwise debug side table positions would not match up).
  bool is_inspectable() const { return is_liftoff() && for_debugging(); }

310
  base::Vector<const uint8_t> protected_instructions_data() const {
311 312 313 314
    return {meta_data_.get(),
            static_cast<size_t>(protected_instructions_size_)};
  }

315 316 317
  base::Vector<const trap_handler::ProtectedInstructionData>
  protected_instructions() const {
    return base::Vector<const trap_handler::ProtectedInstructionData>::cast(
318
        protected_instructions_data());
319 320
  }

321
  void Validate() const;
322
  void Print(const char* name = nullptr) const;
323
  void MaybePrint() const;
324
  void Disassemble(const char* name, std::ostream& os,
325
                   Address current_pc = kNullAddress) const;
326

327
  static bool ShouldBeLogged(Isolate* isolate);
328
  void LogCode(Isolate* isolate, const char* source_url, int script_id) const;
329

330 331
  WasmCode(const WasmCode&) = delete;
  WasmCode& operator=(const WasmCode&) = delete;
332 333
  ~WasmCode();

334
  void IncRef() {
335
    int old_val = ref_count_.fetch_add(1, std::memory_order_acq_rel);
336 337 338 339 340
    DCHECK_LE(1, old_val);
    DCHECK_GT(kMaxInt, old_val);
    USE(old_val);
  }

341 342
  // Decrement the ref count. Returns whether this code becomes dead and needs
  // to be freed.
343
  V8_WARN_UNUSED_RESULT bool DecRef() {
344
    int old_count = ref_count_.load(std::memory_order_acquire);
345 346 347 348
    while (true) {
      DCHECK_LE(1, old_count);
      if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
      if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
349
                                           std::memory_order_acq_rel)) {
350 351 352
        return false;
      }
    }
353 354
  }

355 356 357 358 359 360 361 362
  // Decrement the ref count on code that is known to be in use (i.e. the ref
  // count cannot drop to zero here).
  void DecRefOnLiveCode() {
    int old_count = ref_count_.fetch_sub(1, std::memory_order_acq_rel);
    DCHECK_LE(2, old_count);
    USE(old_count);
  }

363 364 365 366
  // Decrement the ref count on code that is known to be dead, even though there
  // might still be C++ references. Returns whether this drops the last
  // reference and the code needs to be freed.
  V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode() {
367
    return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
368 369
  }

370 371
  // Decrement the ref count on a set of {WasmCode} objects, potentially
  // belonging to different {NativeModule}s. Dead code will be deleted.
372
  static void DecrementRefCount(base::Vector<WasmCode* const>);
373

374 375 376
  // Returns the last source position before {offset}.
  int GetSourcePositionBefore(int offset);

377
  // Returns whether this code was generated for debugging. If this returns
378 379 380 381 382
  // {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
  // bailed out.
  ForDebugging for_debugging() const {
    return ForDebuggingField::decode(flags_);
  }
383

384 385
  enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };

386 387 388
 private:
  friend class NativeModule;

389 390 391 392 393 394 395 396
  WasmCode(NativeModule* native_module, int index,
           base::Vector<byte> instructions, int stack_slots,
           uint32_t tagged_parameter_slots, int safepoint_table_offset,
           int handler_table_offset, int constant_pool_offset,
           int code_comments_offset, int unpadded_binary_size,
           base::Vector<const byte> protected_instructions_data,
           base::Vector<const byte> reloc_info,
           base::Vector<const byte> source_position_table, Kind kind,
397
           ExecutionTier tier, ForDebugging for_debugging)
398 399
      : native_module_(native_module),
        instructions_(instructions.begin()),
400 401
        flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
               ForDebuggingField::encode(for_debugging)),
402 403
        meta_data_(ConcatenateBytes(
            {protected_instructions_data, reloc_info, source_position_table})),
404
        instructions_size_(instructions.length()),
405 406 407
        reloc_info_size_(reloc_info.length()),
        source_positions_size_(source_position_table.length()),
        protected_instructions_size_(protected_instructions_data.length()),
408 409 410
        index_(index),
        constant_pool_offset_(constant_pool_offset),
        stack_slots_(stack_slots),
411
        tagged_parameter_slots_(tagged_parameter_slots),
412
        safepoint_table_offset_(safepoint_table_offset),
413
        handler_table_offset_(handler_table_offset),
414
        code_comments_offset_(code_comments_offset),
415
        unpadded_binary_size_(unpadded_binary_size) {
416 417 418 419
    DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
    DCHECK_LE(handler_table_offset, unpadded_binary_size);
    DCHECK_LE(code_comments_offset, unpadded_binary_size);
    DCHECK_LE(constant_pool_offset, unpadded_binary_size);
420
  }
421

422
  std::unique_ptr<const byte[]> ConcatenateBytes(
423
      std::initializer_list<base::Vector<const byte>>);
424

425 426 427 428
  // Tries to get a reasonable name. Lazily looks up the name section, and falls
  // back to the function index. Return value is guaranteed to not be empty.
  std::string DebugName() const;

429 430
  // Code objects that have been registered with the global trap handler within
  // this process, will have a {trap_handler_index} associated with them.
431 432 433 434 435 436 437 438 439
  int trap_handler_index() const {
    CHECK(has_trap_handler_index());
    return trap_handler_index_;
  }
  void set_trap_handler_index(int value) {
    CHECK(!has_trap_handler_index());
    trap_handler_index_ = value;
  }
  bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
440 441 442 443

  // Register protected instruction information with the trap handler. Sets
  // trap_handler_index.
  void RegisterTrapHandlerData();
444

445 446
  // Slow path for {DecRef}: The code becomes potentially dead.
  // Returns whether this code becomes dead and needs to be freed.
447
  V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
448

449 450 451
  NativeModule* const native_module_ = nullptr;
  byte* const instructions_;
  const uint8_t flags_;  // Bit field, see below.
452 453 454 455 456 457
  // {meta_data_} contains several byte vectors concatenated into one:
  //  - protected instructions data of size {protected_instructions_size_}
  //  - relocation info of size {reloc_info_size_}
  //  - source positions of size {source_positions_size_}
  // Note that the protected instructions come first to ensure alignment.
  std::unique_ptr<const byte[]> meta_data_;
458
  const int instructions_size_;
459 460 461
  const int reloc_info_size_;
  const int source_positions_size_;
  const int protected_instructions_size_;
462 463 464
  const int index_;
  const int constant_pool_offset_;
  const int stack_slots_;
465 466 467 468
  // Number and position of tagged parameters passed to this function via the
  // stack, packed into a single uint32. These values are used by the stack
  // walker (e.g. GC) to find references.
  const uint32_t tagged_parameter_slots_;
469 470
  // We care about safepoint data for wasm-to-js functions, since there may be
  // stack/register tagged values for large number conversions.
471 472 473 474
  const int safepoint_table_offset_;
  const int handler_table_offset_;
  const int code_comments_offset_;
  const int unpadded_binary_size_;
475
  int trap_handler_index_ = -1;
476 477 478 479

  // Bits encoded in {flags_}:
  using KindField = base::BitField8<Kind, 0, 3>;
  using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
480
  using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
481

482
  // WasmCode is ref counted. Counters are held by:
483 484 485 486 487 488 489 490 491
  //   1) The jump table / code table.
  //   2) {WasmCodeRefScope}s.
  //   3) The set of potentially dead code in the {WasmEngine}.
  // If a decrement of (1) would drop the ref count to 0, that code becomes a
  // candidate for garbage collection. At that point, we add a ref count for (3)
  // *before* decrementing the counter to ensure the code stays alive as long as
  // it's being used. Once the ref count drops to zero (i.e. after being removed
  // from (3) and all (2)), the code object is deleted and the memory for the
  // machine code is freed.
492
  std::atomic<int> ref_count_{1};
493 494
};

495 496 497 498
// Check that {WasmCode} objects are sufficiently small. We create many of them,
// often for rather small functions.
// Increase the limit if needed, but first check if the size increase is
// justified.
499
#ifndef V8_GC_MOLE
500
static_assert(sizeof(WasmCode) <= 88);
501
#endif
502

503 504
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);

505 506 507
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);

508 509 510
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
 public:
511 512 513
#if V8_TARGET_ARCH_ARM64
  // ARM64 only supports direct calls within a 128 MB range.
  static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
514 515 516
#elif V8_TARGET_ARCH_PPC64
  // branches only takes 26 bits
  static constexpr size_t kMaxCodeSpaceSize = 32 * MB;
517 518 519 520 521 522 523 524
#else
  // Use 1024 MB limit for code spaces on other platforms. This is smaller than
  // the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
  // big reservations, and to ensure that distances within a code space fit
  // within a 32-bit signed integer.
  static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif

525
  explicit WasmCodeAllocator(std::shared_ptr<Counters> async_counters);
526 527
  ~WasmCodeAllocator();

528
  // Call before use, after the {NativeModule} is set up completely.
529
  void Init(VirtualMemory code_space);
530

531 532 533 534 535 536 537 538 539 540 541
  size_t committed_code_space() const {
    return committed_code_space_.load(std::memory_order_acquire);
  }
  size_t generated_code_size() const {
    return generated_code_size_.load(std::memory_order_acquire);
  }
  size_t freed_code_size() const {
    return freed_code_size_.load(std::memory_order_acquire);
  }

  // Allocate code space. Returns a valid buffer or fails with OOM (crash).
542
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
543
  base::Vector<byte> AllocateForCode(NativeModule*, size_t size);
544

545 546
  // Allocate code space within a specific region. Returns a valid buffer or
  // fails with OOM (crash).
547
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
548 549
  base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
                                             base::AddressRegion);
550

551 552 553 554 555 556 557 558 559 560 561
  // Increases or decreases the {writers_count_} field. While there is at least
  // one writer, it is allowed to call {MakeWritable} to make regions writable.
  // When the last writer is removed, all code is switched back to
  // write-protected.
  // Hold the {NativeModule}'s {allocation_mutex_} when calling one of these
  // methods. The methods should only be called via {CodeSpaceWriteScope}.
  V8_EXPORT_PRIVATE void AddWriter();
  V8_EXPORT_PRIVATE void RemoveWriter();

  // Make a code region writable. Only allowed if there is at lease one writer
  // (see above).
562
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
563
  V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
564 565

  // Free memory pages of all given code objects. Used for wasm code GC.
566
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
567
  void FreeCode(base::Vector<WasmCode* const>);
568

569
  // Retrieve the number of separately reserved code spaces.
570
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
571 572
  size_t GetNumCodeSpaces() const;

573 574
  Counters* counters() const { return async_counters_.get(); }

575
 private:
576 577 578 579 580
  // Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
  // restriction on the region to allocate in.
  static constexpr base::AddressRegion kUnrestrictedRegion{
      kNullAddress, std::numeric_limits<size_t>::max()};

581 582 583
  void InsertIntoWritableRegions(base::AddressRegion region,
                                 bool switch_to_writable);

584
  //////////////////////////////////////////////////////////////////////////////
585
  // These fields are protected by the mutex in {NativeModule}.
586 587 588 589 590 591 592

  // Code space that was reserved and is available for allocations (subset of
  // {owned_code_space_}).
  DisjointAllocationPool free_code_space_;
  // Code space that was allocated for code (subset of {owned_code_space_}).
  DisjointAllocationPool allocated_code_space_;
  // Code space that was allocated before but is dead now. Full pages within
593
  // this region are discarded. It's still a subset of {owned_code_space_}.
594 595 596
  DisjointAllocationPool freed_code_space_;
  std::vector<VirtualMemory> owned_code_space_;

597
  // The following two fields are only used if {protect_code_memory_} is true.
598
  int writers_count_{0};
599 600
  std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>
      writable_memory_;
601

602 603 604
  // End of fields protected by {mutex_}.
  //////////////////////////////////////////////////////////////////////////////

605 606 607
  // {protect_code_memory_} is true if traditional memory permission switching
  // is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is
  // being used, or protection is completely disabled.
608
  const bool protect_code_memory_;
609 610 611 612
  std::atomic<size_t> committed_code_space_{0};
  std::atomic<size_t> generated_code_size_{0};
  std::atomic<size_t> freed_code_size_{0};

613
  std::shared_ptr<Counters> async_counters_;
614 615
};

616 617
class V8_EXPORT_PRIVATE NativeModule final {
 public:
618 619
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64 || \
    V8_TARGET_ARCH_PPC64
620
  static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
621
#else
622
  static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
623 624
#endif

625 626 627 628
  NativeModule(const NativeModule&) = delete;
  NativeModule& operator=(const NativeModule&) = delete;
  ~NativeModule();

629 630
  // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
  // code below, i.e. it can be called concurrently from background threads.
631
  // The returned code still needs to be published via {PublishCode}.
632 633 634 635 636 637
  std::unique_ptr<WasmCode> AddCode(
      int index, const CodeDesc& desc, int stack_slots,
      uint32_t tagged_parameter_slots,
      base::Vector<const byte> protected_instructions,
      base::Vector<const byte> source_position_table, WasmCode::Kind kind,
      ExecutionTier tier, ForDebugging for_debugging);
638 639 640

  // {PublishCode} makes the code available to the system by entering it into
  // the code table and patching the jump table. It returns a raw pointer to the
641
  // given {WasmCode} object. Ownership is transferred to the {NativeModule}.
642
  WasmCode* PublishCode(std::unique_ptr<WasmCode>);
643
  std::vector<WasmCode*> PublishCode(base::Vector<std::unique_ptr<WasmCode>>);
644

645 646 647 648 649 650 651
  // ReinstallDebugCode does a subset of PublishCode: It installs the code in
  // the code table and patches the jump table. The given code must be debug
  // code (with breakpoints) and must be owned by this {NativeModule} already.
  // This method is used to re-instantiate code that was removed from the code
  // table and jump table via another {PublishCode}.
  void ReinstallDebugCode(WasmCode*);

652 653 654 655 656 657 658
  struct JumpTablesRef {
    Address jump_table_start = kNullAddress;
    Address far_jump_table_start = kNullAddress;

    bool is_valid() const { return far_jump_table_start != kNullAddress; }
  };

659
  std::pair<base::Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
660
      size_t total_code_size);
661 662

  std::unique_ptr<WasmCode> AddDeserializedCode(
663
      int index, base::Vector<byte> instructions, int stack_slots,
664
      uint32_t tagged_parameter_slots, int safepoint_table_offset,
665 666
      int handler_table_offset, int constant_pool_offset,
      int code_comments_offset, int unpadded_binary_size,
667 668 669 670
      base::Vector<const byte> protected_instructions_data,
      base::Vector<const byte> reloc_info,
      base::Vector<const byte> source_position_table, WasmCode::Kind kind,
      ExecutionTier tier);
671

672 673 674
  // Adds anonymous code for testing purposes.
  WasmCode* AddCodeForTesting(Handle<Code> code);

675 676 677
  // Use {UseLazyStub} to setup lazy compilation per function. It will use the
  // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
  // table with trampolines accordingly.
678
  void UseLazyStub(uint32_t func_index);
679

680 681 682
  // Creates a snapshot of the current state of the code table. This is useful
  // to get a consistent view of the table (e.g. used by the serializer).
  std::vector<WasmCode*> SnapshotCodeTable() const;
683 684 685
  // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
  // {owned_code_}.
  std::vector<WasmCode*> SnapshotAllOwnedCode() const;
686

687 688
  WasmCode* GetCode(uint32_t index) const;
  bool HasCode(uint32_t index) const;
689
  bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const;
690

691 692 693
  void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
  WasmModuleSourceMap* GetWasmSourceMap() const;

694
  Address jump_table_start() const {
695 696
    return main_jump_table_ ? main_jump_table_->instruction_start()
                            : kNullAddress;
697 698
  }

699
  uint32_t GetJumpTableOffset(uint32_t func_index) const;
700

701 702
  // Returns the canonical target to call for the given function (the slot in
  // the first jump table).
703
  Address GetCallTargetForFunction(uint32_t func_index) const;
704

705
  // Finds the jump tables that should be used for given code region. This
706 707
  // information is then passed to {GetNearCallTargetForFunction} and
  // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
708
  // up there. Return an empty struct if no suitable jump tables exist.
709
  JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
710 711

  // Similarly to {GetCallTargetForFunction}, but uses the jump table previously
712
  // looked up via {FindJumpTablesForRegionLocked}.
713
  Address GetNearCallTargetForFunction(uint32_t func_index,
714
                                       const JumpTablesRef&) const;
715

716
  // Get a runtime stub entry (which is a far jump table slot) in the jump table
717
  // previously looked up via {FindJumpTablesForRegionLocked}.
718
  Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
719
                                  const JumpTablesRef&) const;
720

721 722
  // Reverse lookup from a given call target (which must be a jump table slot)
  // to a function index.
723 724
  uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;

725 726 727 728 729 730 731 732 733 734 735
  void AddWriter() {
    base::RecursiveMutexGuard guard{&allocation_mutex_};
    code_allocator_.AddWriter();
  }

  void RemoveWriter() {
    base::RecursiveMutexGuard guard{&allocation_mutex_};
    code_allocator_.RemoveWriter();
  }

  void MakeWritable(base::AddressRegion region) {
736
    base::RecursiveMutexGuard guard{&allocation_mutex_};
737
    code_allocator_.MakeWritable(region);
738
  }
739

740 741
  // For cctests, where we build both WasmModule and the runtime objects
  // on the fly, and bypass the instance builder pipeline.
742
  void ReserveCodeTableForTesting(uint32_t max_functions);
743

744
  void LogWasmCodes(Isolate*, Script);
745

746 747 748
  CompilationState* compilation_state() const {
    return compilation_state_.get();
  }
749

750 751 752
  // Create a {CompilationEnv} object for compilation. The caller has to ensure
  // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
  // being used.
753
  CompilationEnv CreateCompilationEnv() const;
754

755 756 757 758 759 760
  uint32_t num_functions() const {
    return module_->num_declared_functions + module_->num_imported_functions;
  }
  uint32_t num_imported_functions() const {
    return module_->num_imported_functions;
  }
761
  BoundsCheckStrategy bounds_checks() const { return bounds_checks_; }
762 763
  void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
  bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
764
  base::Vector<const uint8_t> wire_bytes() const {
765 766
    return std::atomic_load(&wire_bytes_)->as_vector();
  }
767
  const WasmModule* module() const { return module_.get(); }
768
  std::shared_ptr<const WasmModule> shared_module() const { return module_; }
769 770 771
  size_t committed_code_space() const {
    return code_allocator_.committed_code_space();
  }
772 773 774 775
  size_t generated_code_size() const {
    return code_allocator_.generated_code_size();
  }
  size_t liftoff_bailout_count() const { return liftoff_bailout_count_.load(); }
776 777
  size_t liftoff_code_size() const { return liftoff_code_size_.load(); }
  size_t turbofan_code_size() const { return turbofan_code_size_.load(); }
778 779 780 781
  size_t baseline_compilation_cpu_duration() const {
    return baseline_compilation_cpu_duration_.load();
  }
  size_t tier_up_cpu_duration() const { return tier_up_cpu_duration_.load(); }
782

783 784 785 786
  bool HasWireBytes() const {
    auto wire_bytes = std::atomic_load(&wire_bytes_);
    return wire_bytes && !wire_bytes->empty();
  }
787
  void SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes);
788

789 790
  void UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier);
  void AddLiftoffBailout() {
791
    liftoff_bailout_count_.fetch_add(1, std::memory_order_relaxed);
792 793
  }

794 795
  WasmCode* Lookup(Address) const;

796 797 798 799
  WasmImportWrapperCache* import_wrapper_cache() const {
    return import_wrapper_cache_.get();
  }

800 801
  const WasmFeatures& enabled_features() const { return enabled_features_; }

802 803 804 805
  // Returns the runtime stub id that corresponds to the given address (which
  // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
  WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;

806
  // Sample the current code size of this modules to the given counters.
807
  enum CodeSamplingTime : int8_t { kAfterBaseline, kAfterTopTier, kSampling };
808 809
  void SampleCodeSize(Counters*, CodeSamplingTime) const;

810 811 812
  V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
      WasmCompilationResult);
  V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
813
      base::Vector<WasmCompilationResult>);
814

815
  // Set a new tiering state, but don't trigger any recompilation yet; use
816
  // {RecompileForTiering} for that. The two steps are split because In some
817 818
  // scenarios we need to drop locks before triggering recompilation.
  void SetTieringState(TieringState);
819

820 821
  // Check whether this modules is tiered down for debugging.
  bool IsTieredDown();
822

823 824 825 826
  // Fully recompile this module in the tier set previously via
  // {SetTieringState}. The calling thread contributes to compilation and only
  // returns once recompilation is done.
  void RecompileForTiering();
827

828 829 830 831 832 833
  // Find all functions that need to be recompiled for a new tier. Note that
  // compilation jobs might run concurrently, so this method only considers the
  // compilation state of this native module at the time of the call.
  // Returns a vector of function indexes to recompile.
  std::vector<int> FindFunctionsToRecompile(TieringState);

834 835 836
  // Free a set of functions of this module. Uncommits whole pages if possible.
  // The given vector must be ordered by the instruction start address, and all
  // {WasmCode} objects must not be used any more.
837 838
  // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
  // its accounting.
839
  void FreeCode(base::Vector<WasmCode* const>);
840

841 842 843
  // Retrieve the number of separately reserved code spaces for this module.
  size_t GetNumberOfCodeSpacesForTesting() const;

844 845 846
  // Check whether there is DebugInfo for this NativeModule.
  bool HasDebugInfo() const;

847 848 849
  // Get or create the debug info for this NativeModule.
  DebugInfo* GetDebugInfo();

850
  uint32_t* tiering_budget_array() { return tiering_budgets_.get(); }
851

852 853
  Counters* counters() const { return code_allocator_.counters(); }

854
 private:
855
  friend class WasmCode;
856
  friend class WasmCodeAllocator;
857
  friend class WasmCodeManager;
858
  friend class CodeSpaceWriteScope;
859

860 861 862
  struct CodeSpaceData {
    base::AddressRegion region;
    WasmCode* jump_table;
863
    WasmCode* far_jump_table;
864 865
  };

866
  // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
867 868
  NativeModule(const WasmFeatures& enabled_features,
               DynamicTiering dynamic_tiering, VirtualMemory code_space,
869
               std::shared_ptr<const WasmModule> module,
870 871
               std::shared_ptr<Counters> async_counters,
               std::shared_ptr<NativeModule>* shared_this);
872

873
  std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
874
      int index, const CodeDesc& desc, int stack_slots,
875
      uint32_t tagged_parameter_slots,
876 877
      base::Vector<const byte> protected_instructions_data,
      base::Vector<const byte> source_position_table, WasmCode::Kind kind,
878
      ExecutionTier tier, ForDebugging for_debugging,
879
      base::Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
880

881 882
  WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
                                               base::AddressRegion);
883

884 885
  void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);

886
  // Hold the {allocation_mutex_} when calling one of these methods.
887 888 889 890
  // {slot_index} is the index in the declared functions, i.e. function index
  // minus the number of imported functions.
  void PatchJumpTablesLocked(uint32_t slot_index, Address target);
  void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
891
                            Address target);
892

893
  // Called by the {WasmCodeAllocator} to register a new code space.
894
  void AddCodeSpaceLocked(base::AddressRegion);
895

896 897 898
  // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
  WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);

899 900 901
  // Transfer owned code from {new_owned_code_} to {owned_code_}.
  void TransferNewOwnedCodeLocked() const;

902 903 904 905
  // Add code to the code cache, if it meets criteria for being cached and we do
  // not have code in the cache yet.
  void InsertToCodeCache(WasmCode* code);

906 907 908 909 910 911 912 913 914
  // -- Fields of {NativeModule} start here.

  // Keep the engine alive as long as this NativeModule is alive. In its
  // destructor, the NativeModule still communicates with the WasmCodeManager,
  // owned by the engine. This fields comes before other fields which also still
  // access the engine (like the code allocator), so that it's destructor runs
  // last.
  OperationsBarrier::Token engine_scope_;

915 916 917 918
  // {WasmCodeAllocator} manages all code reservations and allocations for this
  // {NativeModule}.
  WasmCodeAllocator code_allocator_;

919 920 921 922 923
  // Features enabled for this module. We keep a copy of the features that
  // were enabled at the time of the creation of this native module,
  // to be consistent across asynchronous compilations later.
  const WasmFeatures enabled_features_;

924 925
  // The decoded module, stored in a shared_ptr such that background compile
  // tasks can keep this alive.
926 927
  std::shared_ptr<const WasmModule> module_;

928 929
  std::unique_ptr<WasmModuleSourceMap> source_map_;

930 931
  // Wire bytes, held in a shared_ptr so they can be kept alive by the
  // {WireBytesStorage}, held by background compile tasks.
932
  std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
933

934 935 936
  // The first allocated jump table. Always used by external calls (from JS).
  // Wasm calls might use one of the other jump tables stored in
  // {code_space_data_}.
937
  WasmCode* main_jump_table_ = nullptr;
938

939 940 941
  // The first allocated far jump table.
  WasmCode* main_far_jump_table_ = nullptr;

942 943 944 945
  // Lazy compile stub table, containing entries to jump to the
  // {WasmCompileLazy} builtin, passing the function index.
  WasmCode* lazy_compile_table_ = nullptr;

946 947 948
  // The compilation state keeps track of compilation tasks for this module.
  // Note that its destructor blocks until all tasks are finished/aborted and
  // hence needs to be destructed first when this native module dies.
949
  std::unique_ptr<CompilationState> compilation_state_;
950

951 952 953
  // A cache of the import wrappers, keyed on the kind and signature.
  std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;

954
  // Array to handle number of function calls.
955
  std::unique_ptr<uint32_t[]> tiering_budgets_;
956

957
  // This mutex protects concurrent calls to {AddCode} and friends.
958
  // TODO(dlehmann): Revert this to a regular {Mutex} again.
959 960 961 962
  // This needs to be a {RecursiveMutex} only because of {CodeSpaceWriteScope}
  // usages, which are (1) either at places that already hold the
  // {allocation_mutex_} or (2) because of multiple open {CodeSpaceWriteScope}s
  // in the call hierarchy. Both are fixable.
963
  mutable base::RecursiveMutex allocation_mutex_;
964

965 966 967
  //////////////////////////////////////////////////////////////////////////////
  // Protected by {allocation_mutex_}:

968 969 970 971 972 973 974 975 976
  // Holds allocated code objects for fast lookup and deletion. For lookup based
  // on pc, the key is the instruction start address of the value. Filled lazily
  // from {new_owned_code_} (below).
  mutable std::map<Address, std::unique_ptr<WasmCode>> owned_code_;

  // Holds owned code which is not inserted into {owned_code_} yet. It will be
  // inserted on demand. This has much better performance than inserting
  // individual code objects.
  mutable std::vector<std::unique_ptr<WasmCode>> new_owned_code_;
977

978
  // Table of the latest code object per function, updated on initial
979 980 981
  // compilation and tier up. The number of entries is
  // {WasmModule::num_declared_functions}, i.e. there are no entries for
  // imported functions.
982
  std::unique_ptr<WasmCode*[]> code_table_;
983

984 985 986
  // Data (especially jump table) per code space.
  std::vector<CodeSpaceData> code_space_data_;

987 988 989 990 991 992
  // Debug information for this module. You only need to hold the allocation
  // mutex while getting the {DebugInfo} pointer, or initializing this field.
  // Further accesses to the {DebugInfo} do not need to be protected by the
  // mutex.
  std::unique_ptr<DebugInfo> debug_info_;

993 994
  TieringState tiering_state_ = kTieredUp;

995 996 997 998 999 1000
  // Cache both baseline and top-tier code if we are debugging, to speed up
  // repeated enabling/disabling of the debugger or profiler.
  // Maps <tier, function_index> to WasmCode.
  std::unique_ptr<std::map<std::pair<ExecutionTier, int>, WasmCode*>>
      cached_code_;

1001 1002 1003
  // End of fields protected by {allocation_mutex_}.
  //////////////////////////////////////////////////////////////////////////////

1004
  const BoundsCheckStrategy bounds_checks_;
1005
  bool lazy_compile_frozen_ = false;
1006
  std::atomic<size_t> liftoff_bailout_count_{0};
1007 1008
  std::atomic<size_t> liftoff_code_size_{0};
  std::atomic<size_t> turbofan_code_size_{0};
1009 1010
  std::atomic<size_t> baseline_compilation_cpu_duration_{0};
  std::atomic<size_t> tier_up_cpu_duration_{0};
1011 1012 1013 1014
};

class V8_EXPORT_PRIVATE WasmCodeManager final {
 public:
1015
  WasmCodeManager();
1016 1017
  WasmCodeManager(const WasmCodeManager&) = delete;
  WasmCodeManager& operator=(const WasmCodeManager&) = delete;
1018

1019
  ~WasmCodeManager();
1020

1021
#if defined(V8_OS_WIN64)
1022
  static bool CanRegisterUnwindInfoForNonABICompliantCodeRange();
1023
#endif  // V8_OS_WIN64
1024

1025
  NativeModule* LookupNativeModule(Address pc) const;
1026
  WasmCode* LookupCode(Address pc) const;
1027 1028 1029
  size_t committed_code_space() const {
    return total_committed_code_space_.load();
  }
1030

1031 1032 1033
  // Estimate the needed code space for a Liftoff function based on the size of
  // the function body (wasm byte code).
  static size_t EstimateLiftoffCodeSize(int body_size);
1034
  // Estimate the needed code space from a completely decoded module.
1035
  static size_t EstimateNativeModuleCodeSize(const WasmModule* module,
1036 1037
                                             bool include_liftoff,
                                             DynamicTiering dynamic_tiering);
1038 1039 1040 1041
  // Estimate the needed code space from the number of functions and total code
  // section length.
  static size_t EstimateNativeModuleCodeSize(int num_functions,
                                             int num_imported_functions,
1042
                                             int code_section_length,
1043 1044
                                             bool include_liftoff,
                                             DynamicTiering dynamic_tiering);
1045 1046 1047
  // Estimate the size of meta data needed for the NativeModule, excluding
  // generated code. This data still be stored on the C++ heap.
  static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
1048

1049
  // Set this thread's permission of all owned code space to read-write or
1050 1051 1052 1053 1054 1055
  // read-only (if {writable} is false). Can only be called if
  // {HasMemoryProtectionKeySupport()} is {true}.
  // Since the permission is thread-local, there is no requirement to hold any
  // lock when calling this method.
  void SetThreadWritable(bool writable);

1056 1057 1058
  // Returns true if there is hardware support for PKU. Use
  // {MemoryProtectionKeysEnabled} to also check if PKU usage is enabled via
  // flags.
1059
  bool HasMemoryProtectionKeySupport() const;
1060

1061 1062 1063
  // Returns true if PKU should be used.
  bool MemoryProtectionKeysEnabled() const;

1064 1065 1066 1067 1068
  // Returns {true} if the memory protection key is write-enabled for the
  // current thread.
  // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
  bool MemoryProtectionKeyWritable() const;

1069 1070 1071 1072
  // Initialize the current thread's permissions for the memory protection key,
  // if we have support.
  void InitializeMemoryProtectionKeyPermissionsIfSupported() const;

1073 1074 1075 1076 1077 1078
  // Allocate new memory for assembler buffers, potentially protected by PKU.
  base::AddressRegion AllocateAssemblerBufferSpace(int size);

  // Free previously allocated space for assembler buffers.
  void FreeAssemblerBufferSpace(base::AddressRegion region);

1079
 private:
1080
  friend class WasmCodeAllocator;
1081 1082
  friend class WasmEngine;

1083
  std::shared_ptr<NativeModule> NewNativeModule(
1084 1085
      Isolate* isolate, const WasmFeatures& enabled_features,
      size_t code_size_estimate, std::shared_ptr<const WasmModule> module);
1086

1087 1088
  V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
                                                  void* hint = nullptr);
1089
  void Commit(base::AddressRegion);
1090
  void Decommit(base::AddressRegion);
1091

1092
  void FreeNativeModule(base::Vector<VirtualMemory> owned_code,
1093
                        size_t committed_size);
1094

1095
  void AssignRange(base::AddressRegion, NativeModule*);
1096

1097
  const size_t max_committed_code_space_;
1098

1099
  std::atomic<size_t> total_committed_code_space_{0};
1100 1101 1102 1103 1104 1105
  // If the committed code space exceeds {critical_committed_code_space_}, then
  // we trigger a GC before creating the next module. This value is set to the
  // currently committed space plus 50% of the available code space on creation
  // and updated after each GC.
  std::atomic<size_t> critical_committed_code_space_;

1106
  int memory_protection_key_;
1107

1108
  mutable base::Mutex native_modules_mutex_;
1109 1110 1111 1112

  //////////////////////////////////////////////////////////////////////////////
  // Protected by {native_modules_mutex_}:

1113
  std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
1114 1115 1116

  // End of fields protected by {native_modules_mutex_}.
  //////////////////////////////////////////////////////////////////////////////
1117 1118
};

1119 1120 1121
// {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
// by e.g. creating new code or looking up code by its address are added to the
// top-most {WasmCodeRefScope}.
1122
class V8_EXPORT_PRIVATE V8_NODISCARD WasmCodeRefScope {
1123 1124
 public:
  WasmCodeRefScope();
1125 1126
  WasmCodeRefScope(const WasmCodeRefScope&) = delete;
  WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
1127 1128 1129 1130 1131 1132 1133 1134
  ~WasmCodeRefScope();

  // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
  // there is no current scope.
  static void AddRef(WasmCode*);

 private:
  WasmCodeRefScope* const previous_scope_;
1135
  std::vector<WasmCode*> code_ptrs_;
1136 1137
};

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
// ref-counted pointer to a {WasmCode} object.
class GlobalWasmCodeRef {
 public:
  explicit GlobalWasmCodeRef(WasmCode* code,
                             std::shared_ptr<NativeModule> native_module)
      : code_(code), native_module_(std::move(native_module)) {
    code_->IncRef();
  }

1148 1149 1150
  GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
  GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;

1151
  ~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162

  // Get a pointer to the contained {WasmCode} object. This is only guaranteed
  // to exist as long as this {GlobalWasmCodeRef} exists.
  WasmCode* code() const { return code_; }

 private:
  WasmCode* const code_;
  // Also keep the {NativeModule} alive.
  const std::shared_ptr<NativeModule> native_module_;
};

1163
Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId);
1164 1165
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);

1166 1167 1168
}  // namespace wasm
}  // namespace internal
}  // namespace v8
1169 1170

#endif  // V8_WASM_WASM_CODE_MANAGER_H_