wasm-code-manager.h 48.1 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6 7 8
#if !V8_ENABLE_WEBASSEMBLY
#error This header should only be included if WebAssembly is enabled.
#endif  // !V8_ENABLE_WEBASSEMBLY

9 10
#ifndef V8_WASM_WASM_CODE_MANAGER_H_
#define V8_WASM_WASM_CODE_MANAGER_H_
11

12
#include <atomic>
13
#include <map>
14
#include <memory>
15
#include <set>
16 17
#include <utility>
#include <vector>
18

19
#include "src/base/address-region.h"
20
#include "src/base/bit-field.h"
21
#include "src/base/macros.h"
22
#include "src/base/vector.h"
23
#include "src/builtins/builtins.h"
24
#include "src/common/code-memory-access.h"
25
#include "src/handles/handles.h"
26
#include "src/tasks/operations-barrier.h"
27
#include "src/trap-handler/trap-handler.h"
28
#include "src/wasm/compilation-environment.h"
29
#include "src/wasm/wasm-features.h"
30
#include "src/wasm/wasm-limits.h"
31
#include "src/wasm/wasm-module-sourcemap.h"
32
#include "src/wasm/wasm-tier.h"
33

34 35
namespace v8 {
namespace internal {
36 37

class Code;
38 39
class CodeDesc;
class Isolate;
40

41 42
namespace wasm {

43
class DebugInfo;
44
class NamesProvider;
45
class NativeModule;
46
struct WasmCompilationResult;
47
class WasmEngine;
48
class WasmImportWrapperCache;
49 50
struct WasmModule;

51 52
// Convenience macro listing all wasm runtime stubs. Note that the first few
// elements of the list coincide with {compiler::TrapId}, order matters.
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
  FOREACH_WASM_TRAPREASON(VTRAP)         \
  V(WasmCompileLazy)                     \
  V(WasmTriggerTierUp)                   \
  V(WasmDebugBreak)                      \
  V(WasmInt32ToHeapNumber)               \
  V(WasmTaggedNonSmiToInt32)             \
  V(WasmFloat32ToNumber)                 \
  V(WasmFloat64ToNumber)                 \
  V(WasmTaggedToFloat64)                 \
  V(WasmAllocateJSArray)                 \
  V(WasmAtomicNotify)                    \
  V(WasmI32AtomicWait32)                 \
  V(WasmI32AtomicWait64)                 \
  V(WasmI64AtomicWait32)                 \
  V(WasmI64AtomicWait64)                 \
  V(WasmGetOwnProperty)                  \
  V(WasmRefFunc)                         \
  V(WasmMemoryGrow)                      \
  V(WasmTableInit)                       \
  V(WasmTableCopy)                       \
  V(WasmTableFill)                       \
  V(WasmTableGrow)                       \
  V(WasmTableGet)                        \
  V(WasmTableSet)                        \
  V(WasmTableGetFuncRef)                 \
  V(WasmTableSetFuncRef)                 \
  V(WasmStackGuard)                      \
  V(WasmStackOverflow)                   \
  V(WasmAllocateFixedArray)              \
  V(WasmThrow)                           \
  V(WasmRethrow)                         \
  V(WasmRethrowExplicitContext)          \
  V(WasmTraceEnter)                      \
  V(WasmTraceExit)                       \
  V(WasmTraceMemory)                     \
  V(BigIntToI32Pair)                     \
  V(BigIntToI64)                         \
  V(CallRefIC)                           \
  V(DoubleToI)                           \
  V(I32PairToBigInt)                     \
  V(I64ToBigInt)                         \
  V(RecordWriteSaveFP)                   \
  V(RecordWriteIgnoreFP)                 \
  V(ToNumber)                            \
  IF_TSAN(V, TSANRelaxedStore8IgnoreFP)  \
  IF_TSAN(V, TSANRelaxedStore8SaveFP)    \
  IF_TSAN(V, TSANRelaxedStore16IgnoreFP) \
  IF_TSAN(V, TSANRelaxedStore16SaveFP)   \
  IF_TSAN(V, TSANRelaxedStore32IgnoreFP) \
  IF_TSAN(V, TSANRelaxedStore32SaveFP)   \
  IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
  IF_TSAN(V, TSANRelaxedStore64SaveFP)   \
  IF_TSAN(V, TSANSeqCstStore8IgnoreFP)   \
  IF_TSAN(V, TSANSeqCstStore8SaveFP)     \
  IF_TSAN(V, TSANSeqCstStore16IgnoreFP)  \
  IF_TSAN(V, TSANSeqCstStore16SaveFP)    \
  IF_TSAN(V, TSANSeqCstStore32IgnoreFP)  \
  IF_TSAN(V, TSANSeqCstStore32SaveFP)    \
  IF_TSAN(V, TSANSeqCstStore64IgnoreFP)  \
  IF_TSAN(V, TSANSeqCstStore64SaveFP)    \
  IF_TSAN(V, TSANRelaxedLoad32IgnoreFP)  \
  IF_TSAN(V, TSANRelaxedLoad32SaveFP)    \
  IF_TSAN(V, TSANRelaxedLoad64IgnoreFP)  \
  IF_TSAN(V, TSANRelaxedLoad64SaveFP)    \
  V(WasmAllocateArray_Uninitialized)     \
  V(WasmArrayCopy)                       \
  V(WasmArrayCopyWithChecks)             \
121
  V(WasmArrayNewSegment)                 \
122 123 124 125 126
  V(WasmAllocateStructWithRtt)           \
  V(WasmSubtypeCheck)                    \
  V(WasmOnStackReplace)                  \
  V(WasmSuspend)                         \
  V(WasmStringNewWtf8)                   \
127
  V(WasmStringNewWtf16)                  \
128 129
  V(WasmStringConst)                     \
  V(WasmStringMeasureUtf8)               \
130
  V(WasmStringMeasureWtf8)               \
131
  V(WasmStringEncodeWtf8)                \
132
  V(WasmStringEncodeWtf16)               \
133
  V(WasmStringConcat)                    \
134
  V(WasmStringEqual)                     \
135
  V(WasmStringIsUSVSequence)             \
136
  V(WasmStringViewWtf16GetCodeUnit)      \
137
  V(WasmStringViewWtf16Encode)           \
138
  V(WasmStringViewWtf16Slice)            \
139
  V(WasmStringNewWtf8Array)              \
140
  V(WasmStringNewWtf16Array)             \
141
  V(WasmStringEncodeWtf8Array)           \
142 143
  V(WasmStringEncodeWtf16Array)          \
  V(WasmStringAsWtf8)                    \
144
  V(WasmStringViewWtf8Advance)           \
145
  V(WasmStringViewWtf8Encode)            \
146 147
  V(WasmStringViewWtf8Slice)             \
  V(WasmStringAsIter)                    \
148 149
  V(WasmStringViewIterNext)              \
  V(WasmStringViewIterAdvance)           \
150 151
  V(WasmStringViewIterRewind)            \
  V(WasmStringViewIterSlice)
152

153
// Sorted, disjoint and non-overlapping memory regions. A region is of the
154 155 156 157
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
 public:
158
  MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool);
159 160
  explicit DisjointAllocationPool(base::AddressRegion region)
      : regions_({region}) {}
161

162 163 164
  // Merge the parameter region into this object. The assumption is that the
  // passed parameter is not intersecting this object - for example, it was
  // obtained from a previous Allocate. Returns the merged region.
165
  base::AddressRegion Merge(base::AddressRegion);
166

167
  // Allocate a contiguous region of size {size}. Return an empty region on
168
  // failure.
169
  base::AddressRegion Allocate(size_t size);
170

171
  // Allocate a contiguous region of size {size} within {region}. Return an
172
  // empty region on failure.
173 174
  base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);

175
  bool IsEmpty() const { return regions_.empty(); }
176 177

  const auto& regions() const { return regions_; }
178 179

 private:
180
  std::set<base::AddressRegion, base::AddressRegion::StartAddressLess> regions_;
181 182
};

183 184
class V8_EXPORT_PRIVATE WasmCode final {
 public:
185
  enum Kind { kWasmFunction, kWasmToCapiWrapper, kWasmToJsWrapper, kJumpTable };
186

187 188 189
  // Each runtime stub is identified by an id. This id is used to reference the
  // stub via {RelocInfo::WASM_STUB_CALL} and gets resolved during relocation.
  enum RuntimeStubId {
190 191 192 193
#define DEF_ENUM(Name) k##Name,
#define DEF_ENUM_TRAP(Name) kThrowWasm##Name,
    WASM_RUNTIME_STUB_LIST(DEF_ENUM, DEF_ENUM_TRAP)
#undef DEF_ENUM_TRAP
194 195 196 197
#undef DEF_ENUM
        kRuntimeStubCount
  };

198 199 200 201 202 203
  static constexpr RuntimeStubId GetRecordWriteStub(SaveFPRegsMode fp_mode) {
    switch (fp_mode) {
      case SaveFPRegsMode::kIgnore:
        return RuntimeStubId::kRecordWriteIgnoreFP;
      case SaveFPRegsMode::kSave:
        return RuntimeStubId::kRecordWriteSaveFP;
204 205 206
    }
  }

207
#ifdef V8_IS_TSAN
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
  static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
                                        std::memory_order order) {
    if (order == std::memory_order_relaxed) {
      if (size == kInt8Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore8SaveFP;
      } else if (size == kInt16Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore16SaveFP;
      } else if (size == kInt32Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore32SaveFP;
      } else {
        CHECK_EQ(size, kInt64Size);
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
                   : RuntimeStubId::kTSANRelaxedStore64SaveFP;
      }
229
    } else {
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
      DCHECK_EQ(order, std::memory_order_seq_cst);
      if (size == kInt8Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore8SaveFP;
      } else if (size == kInt16Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore16SaveFP;
      } else if (size == kInt32Size) {
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore32SaveFP;
      } else {
        CHECK_EQ(size, kInt64Size);
        return fp_mode == SaveFPRegsMode::kIgnore
                   ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
                   : RuntimeStubId::kTSANSeqCstStore64SaveFP;
      }
249
    }
250
  }
251 252 253 254 255 256 257 258 259 260 261 262 263 264

  static RuntimeStubId GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode,
                                              int size) {
    if (size == kInt32Size) {
      return fp_mode == SaveFPRegsMode::kIgnore
                 ? RuntimeStubId::kTSANRelaxedLoad32IgnoreFP
                 : RuntimeStubId::kTSANRelaxedLoad32SaveFP;
    } else {
      CHECK_EQ(size, kInt64Size);
      return fp_mode == SaveFPRegsMode::kIgnore
                 ? RuntimeStubId::kTSANRelaxedLoad64IgnoreFP
                 : RuntimeStubId::kTSANRelaxedLoad64SaveFP;
    }
  }
265 266
#endif  // V8_IS_TSAN

267 268 269
  base::Vector<byte> instructions() const {
    return base::VectorOf(instructions_,
                          static_cast<size_t>(instructions_size_));
270
  }
271
  Address instruction_start() const {
272
    return reinterpret_cast<Address>(instructions_);
273
  }
274
  base::Vector<const byte> reloc_info() const {
275 276 277
    return {protected_instructions_data().end(),
            static_cast<size_t>(reloc_info_size_)};
  }
278
  base::Vector<const byte> source_positions() const {
279
    return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
280
  }
281

282
  int index() const { return index_; }
283
  // Anonymous functions are functions that don't carry an index.
284
  bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
285
  Kind kind() const { return KindField::decode(flags_); }
286
  NativeModule* native_module() const { return native_module_; }
287
  ExecutionTier tier() const { return ExecutionTierField::decode(flags_); }
288
  Address constant_pool() const;
289
  Address handler_table() const;
290
  int handler_table_size() const;
291
  Address code_comments() const;
292 293 294 295 296 297 298
  int code_comments_size() const;
  int constant_pool_offset() const { return constant_pool_offset_; }
  int safepoint_table_offset() const { return safepoint_table_offset_; }
  int handler_table_offset() const { return handler_table_offset_; }
  int code_comments_offset() const { return code_comments_offset_; }
  int unpadded_binary_size() const { return unpadded_binary_size_; }
  int stack_slots() const { return stack_slots_; }
299 300 301 302 303 304 305 306 307
  uint16_t first_tagged_parameter_slot() const {
    return tagged_parameter_slots_ >> 16;
  }
  uint16_t num_tagged_parameter_slots() const {
    return tagged_parameter_slots_ & 0xFFFF;
  }
  uint32_t raw_tagged_parameter_slots_for_serialization() const {
    return tagged_parameter_slots_;
  }
308

309
  bool is_liftoff() const { return tier() == ExecutionTier::kLiftoff; }
310 311 312

  bool is_turbofan() const { return tier() == ExecutionTier::kTurbofan; }

313
  bool contains(Address pc) const {
314 315
    return reinterpret_cast<Address>(instructions_) <= pc &&
           pc < reinterpret_cast<Address>(instructions_ + instructions_size_);
316
  }
317

318 319 320 321
  // Only Liftoff code that was generated for debugging can be inspected
  // (otherwise debug side table positions would not match up).
  bool is_inspectable() const { return is_liftoff() && for_debugging(); }

322
  base::Vector<const uint8_t> protected_instructions_data() const {
323 324 325 326
    return {meta_data_.get(),
            static_cast<size_t>(protected_instructions_size_)};
  }

327 328 329
  base::Vector<const trap_handler::ProtectedInstructionData>
  protected_instructions() const {
    return base::Vector<const trap_handler::ProtectedInstructionData>::cast(
330
        protected_instructions_data());
331 332
  }

333
  void Validate() const;
334
  void Print(const char* name = nullptr) const;
335
  void MaybePrint() const;
336
  void Disassemble(const char* name, std::ostream& os,
337
                   Address current_pc = kNullAddress) const;
338

339
  static bool ShouldBeLogged(Isolate* isolate);
340
  void LogCode(Isolate* isolate, const char* source_url, int script_id) const;
341

342 343
  WasmCode(const WasmCode&) = delete;
  WasmCode& operator=(const WasmCode&) = delete;
344 345
  ~WasmCode();

346
  void IncRef() {
347
    int old_val = ref_count_.fetch_add(1, std::memory_order_acq_rel);
348 349 350 351 352
    DCHECK_LE(1, old_val);
    DCHECK_GT(kMaxInt, old_val);
    USE(old_val);
  }

353 354
  // Decrement the ref count. Returns whether this code becomes dead and needs
  // to be freed.
355
  V8_WARN_UNUSED_RESULT bool DecRef() {
356
    int old_count = ref_count_.load(std::memory_order_acquire);
357 358 359 360
    while (true) {
      DCHECK_LE(1, old_count);
      if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
      if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
361
                                           std::memory_order_acq_rel)) {
362 363 364
        return false;
      }
    }
365 366
  }

367 368 369 370 371 372 373 374
  // Decrement the ref count on code that is known to be in use (i.e. the ref
  // count cannot drop to zero here).
  void DecRefOnLiveCode() {
    int old_count = ref_count_.fetch_sub(1, std::memory_order_acq_rel);
    DCHECK_LE(2, old_count);
    USE(old_count);
  }

375 376 377 378
  // Decrement the ref count on code that is known to be dead, even though there
  // might still be C++ references. Returns whether this drops the last
  // reference and the code needs to be freed.
  V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode() {
379
    return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
380 381
  }

382 383
  // Decrement the ref count on a set of {WasmCode} objects, potentially
  // belonging to different {NativeModule}s. Dead code will be deleted.
384
  static void DecrementRefCount(base::Vector<WasmCode* const>);
385

386 387 388
  // Returns the last source position before {offset}.
  int GetSourcePositionBefore(int offset);

389
  // Returns whether this code was generated for debugging. If this returns
390 391 392 393 394
  // {kForDebugging}, but {tier()} is not {kLiftoff}, then Liftoff compilation
  // bailed out.
  ForDebugging for_debugging() const {
    return ForDebuggingField::decode(flags_);
  }
395

396 397
  enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };

398 399 400
 private:
  friend class NativeModule;

401 402 403 404 405 406 407 408
  WasmCode(NativeModule* native_module, int index,
           base::Vector<byte> instructions, int stack_slots,
           uint32_t tagged_parameter_slots, int safepoint_table_offset,
           int handler_table_offset, int constant_pool_offset,
           int code_comments_offset, int unpadded_binary_size,
           base::Vector<const byte> protected_instructions_data,
           base::Vector<const byte> reloc_info,
           base::Vector<const byte> source_position_table, Kind kind,
409
           ExecutionTier tier, ForDebugging for_debugging)
410 411
      : native_module_(native_module),
        instructions_(instructions.begin()),
412 413
        flags_(KindField::encode(kind) | ExecutionTierField::encode(tier) |
               ForDebuggingField::encode(for_debugging)),
414 415
        meta_data_(ConcatenateBytes(
            {protected_instructions_data, reloc_info, source_position_table})),
416
        instructions_size_(instructions.length()),
417 418 419
        reloc_info_size_(reloc_info.length()),
        source_positions_size_(source_position_table.length()),
        protected_instructions_size_(protected_instructions_data.length()),
420 421 422
        index_(index),
        constant_pool_offset_(constant_pool_offset),
        stack_slots_(stack_slots),
423
        tagged_parameter_slots_(tagged_parameter_slots),
424
        safepoint_table_offset_(safepoint_table_offset),
425
        handler_table_offset_(handler_table_offset),
426
        code_comments_offset_(code_comments_offset),
427
        unpadded_binary_size_(unpadded_binary_size) {
428 429 430 431
    DCHECK_LE(safepoint_table_offset, unpadded_binary_size);
    DCHECK_LE(handler_table_offset, unpadded_binary_size);
    DCHECK_LE(code_comments_offset, unpadded_binary_size);
    DCHECK_LE(constant_pool_offset, unpadded_binary_size);
432
  }
433

434
  std::unique_ptr<const byte[]> ConcatenateBytes(
435
      std::initializer_list<base::Vector<const byte>>);
436

437 438 439 440
  // Tries to get a reasonable name. Lazily looks up the name section, and falls
  // back to the function index. Return value is guaranteed to not be empty.
  std::string DebugName() const;

441 442
  // Code objects that have been registered with the global trap handler within
  // this process, will have a {trap_handler_index} associated with them.
443 444 445 446 447 448 449 450 451
  int trap_handler_index() const {
    CHECK(has_trap_handler_index());
    return trap_handler_index_;
  }
  void set_trap_handler_index(int value) {
    CHECK(!has_trap_handler_index());
    trap_handler_index_ = value;
  }
  bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
452 453 454 455

  // Register protected instruction information with the trap handler. Sets
  // trap_handler_index.
  void RegisterTrapHandlerData();
456

457 458
  // Slow path for {DecRef}: The code becomes potentially dead.
  // Returns whether this code becomes dead and needs to be freed.
459
  V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
460

461 462 463
  NativeModule* const native_module_ = nullptr;
  byte* const instructions_;
  const uint8_t flags_;  // Bit field, see below.
464 465 466 467 468 469
  // {meta_data_} contains several byte vectors concatenated into one:
  //  - protected instructions data of size {protected_instructions_size_}
  //  - relocation info of size {reloc_info_size_}
  //  - source positions of size {source_positions_size_}
  // Note that the protected instructions come first to ensure alignment.
  std::unique_ptr<const byte[]> meta_data_;
470
  const int instructions_size_;
471 472 473
  const int reloc_info_size_;
  const int source_positions_size_;
  const int protected_instructions_size_;
474 475 476
  const int index_;
  const int constant_pool_offset_;
  const int stack_slots_;
477 478 479 480
  // Number and position of tagged parameters passed to this function via the
  // stack, packed into a single uint32. These values are used by the stack
  // walker (e.g. GC) to find references.
  const uint32_t tagged_parameter_slots_;
481 482
  // We care about safepoint data for wasm-to-js functions, since there may be
  // stack/register tagged values for large number conversions.
483 484 485 486
  const int safepoint_table_offset_;
  const int handler_table_offset_;
  const int code_comments_offset_;
  const int unpadded_binary_size_;
487
  int trap_handler_index_ = -1;
488 489

  // Bits encoded in {flags_}:
490
  using KindField = base::BitField8<Kind, 0, 2>;
491
  using ExecutionTierField = KindField::Next<ExecutionTier, 2>;
492
  using ForDebuggingField = ExecutionTierField::Next<ForDebugging, 2>;
493

494
  // WasmCode is ref counted. Counters are held by:
495 496 497 498 499 500 501 502 503
  //   1) The jump table / code table.
  //   2) {WasmCodeRefScope}s.
  //   3) The set of potentially dead code in the {WasmEngine}.
  // If a decrement of (1) would drop the ref count to 0, that code becomes a
  // candidate for garbage collection. At that point, we add a ref count for (3)
  // *before* decrementing the counter to ensure the code stays alive as long as
  // it's being used. Once the ref count drops to zero (i.e. after being removed
  // from (3) and all (2)), the code object is deleted and the memory for the
  // machine code is freed.
504
  std::atomic<int> ref_count_{1};
505 506
};

507 508 509 510
// Check that {WasmCode} objects are sufficiently small. We create many of them,
// often for rather small functions.
// Increase the limit if needed, but first check if the size increase is
// justified.
511
#ifndef V8_GC_MOLE
512
static_assert(sizeof(WasmCode) <= 88);
513
#endif
514

515 516
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);

517 518 519
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);

520 521 522
// Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator {
 public:
523 524 525
#if V8_TARGET_ARCH_ARM64
  // ARM64 only supports direct calls within a 128 MB range.
  static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
526 527 528
#elif V8_TARGET_ARCH_PPC64
  // branches only takes 26 bits
  static constexpr size_t kMaxCodeSpaceSize = 32 * MB;
529 530 531 532 533 534 535 536
#else
  // Use 1024 MB limit for code spaces on other platforms. This is smaller than
  // the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
  // big reservations, and to ensure that distances within a code space fit
  // within a 32-bit signed integer.
  static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif

537
  explicit WasmCodeAllocator(std::shared_ptr<Counters> async_counters);
538 539
  ~WasmCodeAllocator();

540
  // Call before use, after the {NativeModule} is set up completely.
541
  void Init(VirtualMemory code_space);
542

543 544 545 546 547 548 549 550 551 552 553
  size_t committed_code_space() const {
    return committed_code_space_.load(std::memory_order_acquire);
  }
  size_t generated_code_size() const {
    return generated_code_size_.load(std::memory_order_acquire);
  }
  size_t freed_code_size() const {
    return freed_code_size_.load(std::memory_order_acquire);
  }

  // Allocate code space. Returns a valid buffer or fails with OOM (crash).
554
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
555
  base::Vector<byte> AllocateForCode(NativeModule*, size_t size);
556

557 558
  // Allocate code space within a specific region. Returns a valid buffer or
  // fails with OOM (crash).
559
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
560 561
  base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
                                             base::AddressRegion);
562

563 564 565 566 567 568 569 570 571 572 573
  // Increases or decreases the {writers_count_} field. While there is at least
  // one writer, it is allowed to call {MakeWritable} to make regions writable.
  // When the last writer is removed, all code is switched back to
  // write-protected.
  // Hold the {NativeModule}'s {allocation_mutex_} when calling one of these
  // methods. The methods should only be called via {CodeSpaceWriteScope}.
  V8_EXPORT_PRIVATE void AddWriter();
  V8_EXPORT_PRIVATE void RemoveWriter();

  // Make a code region writable. Only allowed if there is at lease one writer
  // (see above).
574
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
575
  V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
576 577

  // Free memory pages of all given code objects. Used for wasm code GC.
578
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
579
  void FreeCode(base::Vector<WasmCode* const>);
580

581
  // Retrieve the number of separately reserved code spaces.
582
  // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
583 584
  size_t GetNumCodeSpaces() const;

585 586
  Counters* counters() const { return async_counters_.get(); }

587
 private:
588 589 590
  void InsertIntoWritableRegions(base::AddressRegion region,
                                 bool switch_to_writable);

591
  //////////////////////////////////////////////////////////////////////////////
592
  // These fields are protected by the mutex in {NativeModule}.
593 594 595 596 597 598 599

  // Code space that was reserved and is available for allocations (subset of
  // {owned_code_space_}).
  DisjointAllocationPool free_code_space_;
  // Code space that was allocated for code (subset of {owned_code_space_}).
  DisjointAllocationPool allocated_code_space_;
  // Code space that was allocated before but is dead now. Full pages within
600
  // this region are discarded. It's still a subset of {owned_code_space_}.
601 602 603
  DisjointAllocationPool freed_code_space_;
  std::vector<VirtualMemory> owned_code_space_;

604
  // The following two fields are only used if {protect_code_memory_} is true.
605
  int writers_count_{0};
606 607
  std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>
      writable_memory_;
608

609 610 611
  // End of fields protected by {mutex_}.
  //////////////////////////////////////////////////////////////////////////////

612 613 614
  // {protect_code_memory_} is true if traditional memory permission switching
  // is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is
  // being used, or protection is completely disabled.
615
  const bool protect_code_memory_;
616 617 618 619
  std::atomic<size_t> committed_code_space_{0};
  std::atomic<size_t> generated_code_size_{0};
  std::atomic<size_t> freed_code_size_{0};

620
  std::shared_ptr<Counters> async_counters_;
621 622
};

623 624
class V8_EXPORT_PRIVATE NativeModule final {
 public:
625 626
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64 || \
    V8_TARGET_ARCH_PPC64
627
  static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = true;
628
#else
629
  static constexpr bool kNeedsFarJumpsBetweenCodeSpaces = false;
630 631
#endif

632 633 634 635
  NativeModule(const NativeModule&) = delete;
  NativeModule& operator=(const NativeModule&) = delete;
  ~NativeModule();

636 637
  // {AddCode} is thread safe w.r.t. other calls to {AddCode} or methods adding
  // code below, i.e. it can be called concurrently from background threads.
638
  // The returned code still needs to be published via {PublishCode}.
639 640 641 642 643 644
  std::unique_ptr<WasmCode> AddCode(
      int index, const CodeDesc& desc, int stack_slots,
      uint32_t tagged_parameter_slots,
      base::Vector<const byte> protected_instructions,
      base::Vector<const byte> source_position_table, WasmCode::Kind kind,
      ExecutionTier tier, ForDebugging for_debugging);
645 646 647

  // {PublishCode} makes the code available to the system by entering it into
  // the code table and patching the jump table. It returns a raw pointer to the
648
  // given {WasmCode} object. Ownership is transferred to the {NativeModule}.
649
  WasmCode* PublishCode(std::unique_ptr<WasmCode>);
650
  std::vector<WasmCode*> PublishCode(base::Vector<std::unique_ptr<WasmCode>>);
651

652 653 654 655 656 657 658
  // ReinstallDebugCode does a subset of PublishCode: It installs the code in
  // the code table and patches the jump table. The given code must be debug
  // code (with breakpoints) and must be owned by this {NativeModule} already.
  // This method is used to re-instantiate code that was removed from the code
  // table and jump table via another {PublishCode}.
  void ReinstallDebugCode(WasmCode*);

659 660 661 662 663 664 665
  struct JumpTablesRef {
    Address jump_table_start = kNullAddress;
    Address far_jump_table_start = kNullAddress;

    bool is_valid() const { return far_jump_table_start != kNullAddress; }
  };

666
  std::pair<base::Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
667
      size_t total_code_size);
668 669

  std::unique_ptr<WasmCode> AddDeserializedCode(
670
      int index, base::Vector<byte> instructions, int stack_slots,
671
      uint32_t tagged_parameter_slots, int safepoint_table_offset,
672 673
      int handler_table_offset, int constant_pool_offset,
      int code_comments_offset, int unpadded_binary_size,
674 675 676 677
      base::Vector<const byte> protected_instructions_data,
      base::Vector<const byte> reloc_info,
      base::Vector<const byte> source_position_table, WasmCode::Kind kind,
      ExecutionTier tier);
678

679 680 681
  // Adds anonymous code for testing purposes.
  WasmCode* AddCodeForTesting(Handle<Code> code);

682 683 684
  // Use {UseLazyStub} to setup lazy compilation per function. It will use the
  // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
  // table with trampolines accordingly.
685
  void UseLazyStub(uint32_t func_index);
686

687 688 689
  // Creates a snapshot of the current state of the code table. This is useful
  // to get a consistent view of the table (e.g. used by the serializer).
  std::vector<WasmCode*> SnapshotCodeTable() const;
690 691 692
  // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
  // {owned_code_}.
  std::vector<WasmCode*> SnapshotAllOwnedCode() const;
693

694 695
  WasmCode* GetCode(uint32_t index) const;
  bool HasCode(uint32_t index) const;
696
  bool HasCodeWithTier(uint32_t index, ExecutionTier tier) const;
697

698 699 700
  void SetWasmSourceMap(std::unique_ptr<WasmModuleSourceMap> source_map);
  WasmModuleSourceMap* GetWasmSourceMap() const;

701
  Address jump_table_start() const {
702 703
    return main_jump_table_ ? main_jump_table_->instruction_start()
                            : kNullAddress;
704 705
  }

706
  // Finds the jump tables that should be used for given code region. This
707 708
  // information is then passed to {GetNearCallTargetForFunction} and
  // {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
709
  // up there. Return an empty struct if no suitable jump tables exist.
710
  JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
711

712 713
  // Get the call target in the jump table previously looked up via
  // {FindJumpTablesForRegionLocked}.
714
  Address GetNearCallTargetForFunction(uint32_t func_index,
715
                                       const JumpTablesRef&) const;
716

717
  // Get a runtime stub entry (which is a far jump table slot) in the jump table
718
  // previously looked up via {FindJumpTablesForRegionLocked}.
719
  Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
720
                                  const JumpTablesRef&) const;
721

722 723
  // Reverse lookup from a given call target (which must be a jump table slot)
  // to a function index.
724 725
  uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;

726 727 728 729 730 731 732 733 734 735 736
  void AddWriter() {
    base::RecursiveMutexGuard guard{&allocation_mutex_};
    code_allocator_.AddWriter();
  }

  void RemoveWriter() {
    base::RecursiveMutexGuard guard{&allocation_mutex_};
    code_allocator_.RemoveWriter();
  }

  void MakeWritable(base::AddressRegion region) {
737
    base::RecursiveMutexGuard guard{&allocation_mutex_};
738
    code_allocator_.MakeWritable(region);
739
  }
740

741 742
  // For cctests, where we build both WasmModule and the runtime objects
  // on the fly, and bypass the instance builder pipeline.
743
  void ReserveCodeTableForTesting(uint32_t max_functions);
744

745
  void LogWasmCodes(Isolate*, Script);
746

747 748 749
  CompilationState* compilation_state() const {
    return compilation_state_.get();
  }
750

751 752 753
  // Create a {CompilationEnv} object for compilation. The caller has to ensure
  // that the {WasmModule} pointer stays valid while the {CompilationEnv} is
  // being used.
754
  CompilationEnv CreateCompilationEnv() const;
755

756 757 758 759 760 761
  uint32_t num_functions() const {
    return module_->num_declared_functions + module_->num_imported_functions;
  }
  uint32_t num_imported_functions() const {
    return module_->num_imported_functions;
  }
762
  BoundsCheckStrategy bounds_checks() const { return bounds_checks_; }
763 764
  void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
  bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
765
  base::Vector<const uint8_t> wire_bytes() const {
766 767
    return std::atomic_load(&wire_bytes_)->as_vector();
  }
768
  const WasmModule* module() const { return module_.get(); }
769
  std::shared_ptr<const WasmModule> shared_module() const { return module_; }
770 771 772
  size_t committed_code_space() const {
    return code_allocator_.committed_code_space();
  }
773 774 775
  size_t generated_code_size() const {
    return code_allocator_.generated_code_size();
  }
776 777 778 779 780 781 782 783 784
  size_t liftoff_bailout_count() const {
    return liftoff_bailout_count_.load(std::memory_order_relaxed);
  }
  size_t liftoff_code_size() const {
    return liftoff_code_size_.load(std::memory_order_relaxed);
  }
  size_t turbofan_code_size() const {
    return turbofan_code_size_.load(std::memory_order_relaxed);
  }
785 786 787
  size_t baseline_compilation_cpu_duration() const {
    return baseline_compilation_cpu_duration_.load();
  }
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
  size_t tier_up_cpu_duration() const {
    return tier_up_cpu_duration_.load(std::memory_order_relaxed);
  }

  void AddLazyCompilationTimeSample(int64_t sample);

  int num_lazy_compilations() const {
    return num_lazy_compilations_.load(std::memory_order_relaxed);
  }

  int64_t sum_lazy_compilation_time_in_ms() const {
    return sum_lazy_compilation_time_in_micro_sec_.load(
               std::memory_order_relaxed) /
           1000;
  }

  int64_t max_lazy_compilation_time_in_ms() const {
    return max_lazy_compilation_time_in_micro_sec_.load(
               std::memory_order_relaxed) /
           1000;
  }

  // To avoid double-reporting, only the first instantiation should report lazy
  // compilation performance metrics.
  bool ShouldLazyCompilationMetricsBeReported() {
    return should_metrics_be_reported_.exchange(false,
                                                std::memory_order_relaxed);
  }
816

817 818 819 820
  bool HasWireBytes() const {
    auto wire_bytes = std::atomic_load(&wire_bytes_);
    return wire_bytes && !wire_bytes->empty();
  }
821
  void SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes);
822

823 824
  void UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier);
  void AddLiftoffBailout() {
825
    liftoff_bailout_count_.fetch_add(1, std::memory_order_relaxed);
826 827
  }

828 829
  WasmCode* Lookup(Address) const;

830 831 832 833
  WasmImportWrapperCache* import_wrapper_cache() const {
    return import_wrapper_cache_.get();
  }

834 835
  const WasmFeatures& enabled_features() const { return enabled_features_; }

836 837 838 839
  // Returns the runtime stub id that corresponds to the given address (which
  // must be a far jump table slot). Returns {kRuntimeStubCount} on failure.
  WasmCode::RuntimeStubId GetRuntimeStubId(Address runtime_stub_target) const;

840
  // Sample the current code size of this modules to the given counters.
841
  enum CodeSamplingTime : int8_t { kAfterBaseline, kSampling };
842 843
  void SampleCodeSize(Counters*, CodeSamplingTime) const;

844 845 846
  V8_WARN_UNUSED_RESULT std::unique_ptr<WasmCode> AddCompiledCode(
      WasmCompilationResult);
  V8_WARN_UNUSED_RESULT std::vector<std::unique_ptr<WasmCode>> AddCompiledCode(
847
      base::Vector<WasmCompilationResult>);
848

849
  // Set a new tiering state, but don't trigger any recompilation yet; use
850
  // {RecompileForTiering} for that. The two steps are split because In some
851 852
  // scenarios we need to drop locks before triggering recompilation.
  void SetTieringState(TieringState);
853

854 855
  // Check whether this modules is tiered down for debugging.
  bool IsTieredDown();
856

857 858 859 860
  // Fully recompile this module in the tier set previously via
  // {SetTieringState}. The calling thread contributes to compilation and only
  // returns once recompilation is done.
  void RecompileForTiering();
861

862 863 864 865 866 867
  // Find all functions that need to be recompiled for a new tier. Note that
  // compilation jobs might run concurrently, so this method only considers the
  // compilation state of this native module at the time of the call.
  // Returns a vector of function indexes to recompile.
  std::vector<int> FindFunctionsToRecompile(TieringState);

868 869 870
  // Free a set of functions of this module. Uncommits whole pages if possible.
  // The given vector must be ordered by the instruction start address, and all
  // {WasmCode} objects must not be used any more.
871 872
  // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
  // its accounting.
873
  void FreeCode(base::Vector<WasmCode* const>);
874

875 876 877
  // Retrieve the number of separately reserved code spaces for this module.
  size_t GetNumberOfCodeSpacesForTesting() const;

878 879 880
  // Check whether there is DebugInfo for this NativeModule.
  bool HasDebugInfo() const;

881 882 883
  // Get or create the debug info for this NativeModule.
  DebugInfo* GetDebugInfo();

884 885 886
  // Get or create the NamesProvider. Requires {HasWireBytes()}.
  NamesProvider* GetNamesProvider();

887
  uint32_t* tiering_budget_array() { return tiering_budgets_.get(); }
888

889 890
  Counters* counters() const { return code_allocator_.counters(); }

891
 private:
892
  friend class WasmCode;
893
  friend class WasmCodeAllocator;
894
  friend class WasmCodeManager;
895
  friend class CodeSpaceWriteScope;
896

897 898 899
  struct CodeSpaceData {
    base::AddressRegion region;
    WasmCode* jump_table;
900
    WasmCode* far_jump_table;
901 902
  };

903
  // Private constructor, called via {WasmCodeManager::NewNativeModule()}.
904 905
  NativeModule(const WasmFeatures& enabled_features,
               DynamicTiering dynamic_tiering, VirtualMemory code_space,
906
               std::shared_ptr<const WasmModule> module,
907 908
               std::shared_ptr<Counters> async_counters,
               std::shared_ptr<NativeModule>* shared_this);
909

910
  std::unique_ptr<WasmCode> AddCodeWithCodeSpace(
911
      int index, const CodeDesc& desc, int stack_slots,
912
      uint32_t tagged_parameter_slots,
913 914
      base::Vector<const byte> protected_instructions_data,
      base::Vector<const byte> source_position_table, WasmCode::Kind kind,
915
      ExecutionTier tier, ForDebugging for_debugging,
916
      base::Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
917

918 919
  WasmCode* CreateEmptyJumpTableLocked(int jump_table_size);

920 921
  WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
                                               base::AddressRegion);
922

923 924
  void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);

925
  // Hold the {allocation_mutex_} when calling one of these methods.
926 927 928 929
  // {slot_index} is the index in the declared functions, i.e. function index
  // minus the number of imported functions.
  void PatchJumpTablesLocked(uint32_t slot_index, Address target);
  void PatchJumpTableLocked(const CodeSpaceData&, uint32_t slot_index,
930
                            Address target);
931

932
  // Called by the {WasmCodeAllocator} to register a new code space.
933
  void AddCodeSpaceLocked(base::AddressRegion);
934

935 936 937
  // Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
  WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);

938 939 940
  // Transfer owned code from {new_owned_code_} to {owned_code_}.
  void TransferNewOwnedCodeLocked() const;

941 942 943 944
  // Add code to the code cache, if it meets criteria for being cached and we do
  // not have code in the cache yet.
  void InsertToCodeCache(WasmCode* code);

945 946 947 948 949 950 951 952 953
  // -- Fields of {NativeModule} start here.

  // Keep the engine alive as long as this NativeModule is alive. In its
  // destructor, the NativeModule still communicates with the WasmCodeManager,
  // owned by the engine. This fields comes before other fields which also still
  // access the engine (like the code allocator), so that it's destructor runs
  // last.
  OperationsBarrier::Token engine_scope_;

954 955 956 957
  // {WasmCodeAllocator} manages all code reservations and allocations for this
  // {NativeModule}.
  WasmCodeAllocator code_allocator_;

958 959 960 961 962
  // Features enabled for this module. We keep a copy of the features that
  // were enabled at the time of the creation of this native module,
  // to be consistent across asynchronous compilations later.
  const WasmFeatures enabled_features_;

963 964
  // The decoded module, stored in a shared_ptr such that background compile
  // tasks can keep this alive.
965 966
  std::shared_ptr<const WasmModule> module_;

967 968
  std::unique_ptr<WasmModuleSourceMap> source_map_;

969 970
  // Wire bytes, held in a shared_ptr so they can be kept alive by the
  // {WireBytesStorage}, held by background compile tasks.
971
  std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
972

973 974 975
  // The first allocated jump table. Always used by external calls (from JS).
  // Wasm calls might use one of the other jump tables stored in
  // {code_space_data_}.
976
  WasmCode* main_jump_table_ = nullptr;
977

978 979 980
  // The first allocated far jump table.
  WasmCode* main_far_jump_table_ = nullptr;

981 982 983 984
  // Lazy compile stub table, containing entries to jump to the
  // {WasmCompileLazy} builtin, passing the function index.
  WasmCode* lazy_compile_table_ = nullptr;

985 986 987
  // The compilation state keeps track of compilation tasks for this module.
  // Note that its destructor blocks until all tasks are finished/aborted and
  // hence needs to be destructed first when this native module dies.
988
  std::unique_ptr<CompilationState> compilation_state_;
989

990 991 992
  // A cache of the import wrappers, keyed on the kind and signature.
  std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;

993
  // Array to handle number of function calls.
994
  std::unique_ptr<uint32_t[]> tiering_budgets_;
995

996
  // This mutex protects concurrent calls to {AddCode} and friends.
997
  // TODO(dlehmann): Revert this to a regular {Mutex} again.
998 999 1000 1001
  // This needs to be a {RecursiveMutex} only because of {CodeSpaceWriteScope}
  // usages, which are (1) either at places that already hold the
  // {allocation_mutex_} or (2) because of multiple open {CodeSpaceWriteScope}s
  // in the call hierarchy. Both are fixable.
1002
  mutable base::RecursiveMutex allocation_mutex_;
1003

1004 1005 1006
  //////////////////////////////////////////////////////////////////////////////
  // Protected by {allocation_mutex_}:

1007 1008 1009 1010 1011 1012 1013 1014 1015
  // Holds allocated code objects for fast lookup and deletion. For lookup based
  // on pc, the key is the instruction start address of the value. Filled lazily
  // from {new_owned_code_} (below).
  mutable std::map<Address, std::unique_ptr<WasmCode>> owned_code_;

  // Holds owned code which is not inserted into {owned_code_} yet. It will be
  // inserted on demand. This has much better performance than inserting
  // individual code objects.
  mutable std::vector<std::unique_ptr<WasmCode>> new_owned_code_;
1016

1017
  // Table of the latest code object per function, updated on initial
1018 1019 1020
  // compilation and tier up. The number of entries is
  // {WasmModule::num_declared_functions}, i.e. there are no entries for
  // imported functions.
1021
  std::unique_ptr<WasmCode*[]> code_table_;
1022

1023 1024 1025
  // Data (especially jump table) per code space.
  std::vector<CodeSpaceData> code_space_data_;

1026 1027 1028 1029 1030 1031
  // Debug information for this module. You only need to hold the allocation
  // mutex while getting the {DebugInfo} pointer, or initializing this field.
  // Further accesses to the {DebugInfo} do not need to be protected by the
  // mutex.
  std::unique_ptr<DebugInfo> debug_info_;

1032 1033
  std::unique_ptr<NamesProvider> names_provider_;

1034 1035
  TieringState tiering_state_ = kTieredUp;

1036 1037 1038 1039 1040 1041
  // Cache both baseline and top-tier code if we are debugging, to speed up
  // repeated enabling/disabling of the debugger or profiler.
  // Maps <tier, function_index> to WasmCode.
  std::unique_ptr<std::map<std::pair<ExecutionTier, int>, WasmCode*>>
      cached_code_;

1042 1043 1044
  // End of fields protected by {allocation_mutex_}.
  //////////////////////////////////////////////////////////////////////////////

1045
  const BoundsCheckStrategy bounds_checks_;
1046
  bool lazy_compile_frozen_ = false;
1047
  std::atomic<size_t> liftoff_bailout_count_{0};
1048 1049
  std::atomic<size_t> liftoff_code_size_{0};
  std::atomic<size_t> turbofan_code_size_{0};
1050 1051
  std::atomic<size_t> baseline_compilation_cpu_duration_{0};
  std::atomic<size_t> tier_up_cpu_duration_{0};
1052 1053 1054 1055 1056 1057

  // Metrics for lazy compilation.
  std::atomic<int> num_lazy_compilations_{0};
  std::atomic<int64_t> sum_lazy_compilation_time_in_micro_sec_{0};
  std::atomic<int64_t> max_lazy_compilation_time_in_micro_sec_{0};
  std::atomic<bool> should_metrics_be_reported_{true};
1058 1059 1060 1061
};

class V8_EXPORT_PRIVATE WasmCodeManager final {
 public:
1062
  WasmCodeManager();
1063 1064
  WasmCodeManager(const WasmCodeManager&) = delete;
  WasmCodeManager& operator=(const WasmCodeManager&) = delete;
1065

1066
  ~WasmCodeManager();
1067

1068
#if defined(V8_OS_WIN64)
1069
  static bool CanRegisterUnwindInfoForNonABICompliantCodeRange();
1070
#endif  // V8_OS_WIN64
1071

1072
  NativeModule* LookupNativeModule(Address pc) const;
1073
  WasmCode* LookupCode(Address pc) const;
1074 1075 1076
  size_t committed_code_space() const {
    return total_committed_code_space_.load();
  }
1077

1078 1079 1080
  // Estimate the needed code space for a Liftoff function based on the size of
  // the function body (wasm byte code).
  static size_t EstimateLiftoffCodeSize(int body_size);
1081
  // Estimate the needed code space from a completely decoded module.
1082
  static size_t EstimateNativeModuleCodeSize(const WasmModule* module,
1083 1084
                                             bool include_liftoff,
                                             DynamicTiering dynamic_tiering);
1085 1086 1087 1088
  // Estimate the needed code space from the number of functions and total code
  // section length.
  static size_t EstimateNativeModuleCodeSize(int num_functions,
                                             int num_imported_functions,
1089
                                             int code_section_length,
1090 1091
                                             bool include_liftoff,
                                             DynamicTiering dynamic_tiering);
1092 1093 1094
  // Estimate the size of meta data needed for the NativeModule, excluding
  // generated code. This data still be stored on the C++ heap.
  static size_t EstimateNativeModuleMetaDataSize(const WasmModule* module);
1095

1096 1097 1098
  // Returns true if there is hardware support for PKU. Use
  // {MemoryProtectionKeysEnabled} to also check if PKU usage is enabled via
  // flags.
1099
  static bool HasMemoryProtectionKeySupport();
1100

1101
  // Returns true if PKU should be used.
1102
  static bool MemoryProtectionKeysEnabled();
1103

1104 1105 1106
  // Returns {true} if the memory protection key is write-enabled for the
  // current thread.
  // Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
1107
  static bool MemoryProtectionKeyWritable();
1108

1109 1110
  // Initialize the current thread's permissions for the memory protection key,
  // if we have support.
1111
  static void InitializeMemoryProtectionKeyPermissionsIfSupported();
1112

1113 1114 1115 1116 1117 1118
  // Allocate new memory for assembler buffers, potentially protected by PKU.
  base::AddressRegion AllocateAssemblerBufferSpace(int size);

  // Free previously allocated space for assembler buffers.
  void FreeAssemblerBufferSpace(base::AddressRegion region);

1119
 private:
1120
  friend class WasmCodeAllocator;
1121 1122
  friend class WasmEngine;

1123
  std::shared_ptr<NativeModule> NewNativeModule(
1124 1125
      Isolate* isolate, const WasmFeatures& enabled_features,
      size_t code_size_estimate, std::shared_ptr<const WasmModule> module);
1126

1127 1128
  V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
                                                  void* hint = nullptr);
1129
  void Commit(base::AddressRegion);
1130
  void Decommit(base::AddressRegion);
1131

1132
  void FreeNativeModule(base::Vector<VirtualMemory> owned_code,
1133
                        size_t committed_size);
1134

1135
  void AssignRange(base::AddressRegion, NativeModule*);
1136

1137
  const size_t max_committed_code_space_;
1138

1139
  std::atomic<size_t> total_committed_code_space_{0};
1140 1141 1142 1143 1144 1145
  // If the committed code space exceeds {critical_committed_code_space_}, then
  // we trigger a GC before creating the next module. This value is set to the
  // currently committed space plus 50% of the available code space on creation
  // and updated after each GC.
  std::atomic<size_t> critical_committed_code_space_;

1146
  mutable base::Mutex native_modules_mutex_;
1147 1148 1149 1150

  //////////////////////////////////////////////////////////////////////////////
  // Protected by {native_modules_mutex_}:

1151
  std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
1152 1153 1154

  // End of fields protected by {native_modules_mutex_}.
  //////////////////////////////////////////////////////////////////////////////
1155 1156
};

1157 1158 1159
// {WasmCodeRefScope}s form a perfect stack. New {WasmCode} pointers generated
// by e.g. creating new code or looking up code by its address are added to the
// top-most {WasmCodeRefScope}.
1160
class V8_EXPORT_PRIVATE V8_NODISCARD WasmCodeRefScope {
1161 1162
 public:
  WasmCodeRefScope();
1163 1164
  WasmCodeRefScope(const WasmCodeRefScope&) = delete;
  WasmCodeRefScope& operator=(const WasmCodeRefScope&) = delete;
1165 1166 1167 1168 1169 1170 1171 1172
  ~WasmCodeRefScope();

  // Register a {WasmCode} reference in the current {WasmCodeRefScope}. Fails if
  // there is no current scope.
  static void AddRef(WasmCode*);

 private:
  WasmCodeRefScope* const previous_scope_;
1173
  std::vector<WasmCode*> code_ptrs_;
1174 1175
};

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
// Similarly to a global handle, a {GlobalWasmCodeRef} stores a single
// ref-counted pointer to a {WasmCode} object.
class GlobalWasmCodeRef {
 public:
  explicit GlobalWasmCodeRef(WasmCode* code,
                             std::shared_ptr<NativeModule> native_module)
      : code_(code), native_module_(std::move(native_module)) {
    code_->IncRef();
  }

1186 1187 1188
  GlobalWasmCodeRef(const GlobalWasmCodeRef&) = delete;
  GlobalWasmCodeRef& operator=(const GlobalWasmCodeRef&) = delete;

1189
  ~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

  // Get a pointer to the contained {WasmCode} object. This is only guaranteed
  // to exist as long as this {GlobalWasmCodeRef} exists.
  WasmCode* code() const { return code_; }

 private:
  WasmCode* const code_;
  // Also keep the {NativeModule} alive.
  const std::shared_ptr<NativeModule> native_module_;
};

1201
Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId);
1202 1203
const char* GetRuntimeStubName(WasmCode::RuntimeStubId);

1204 1205 1206
}  // namespace wasm
}  // namespace internal
}  // namespace v8
1207 1208

#endif  // V8_WASM_WASM_CODE_MANAGER_H_