wasm-code-manager.cc 104 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/wasm/wasm-code-manager.h"
6

7
#include <algorithm>
8
#include <iomanip>
9
#include <numeric>
10

11
#include "src/base/atomicops.h"
12
#include "src/base/build_config.h"
13
#include "src/base/iterator.h"
14 15
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
16
#include "src/base/small-vector.h"
17
#include "src/base/string-format.h"
18
#include "src/base/vector.h"
19 20 21
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
22
#include "src/common/code-memory-access.h"
23
#include "src/common/globals.h"
24 25 26
#include "src/diagnostics/disassembler.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
27
#include "src/objects/objects-inl.h"
28
#include "src/snapshot/embedded/embedded-data-inl.h"
29
#include "src/utils/ostreams.h"
30
#include "src/wasm/code-space-access.h"
31
#include "src/wasm/compilation-environment.h"
32
#include "src/wasm/function-compiler.h"
33
#include "src/wasm/jump-table-assembler.h"
34
#include "src/wasm/module-compiler.h"
35
#include "src/wasm/names-provider.h"
36
#include "src/wasm/wasm-debug.h"
37
#include "src/wasm/wasm-engine.h"
38
#include "src/wasm/wasm-import-wrapper-cache.h"
39
#include "src/wasm/wasm-module-sourcemap.h"
40 41 42 43
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"

44
#if defined(V8_OS_WIN64)
45
#include "src/base/platform/wrappers.h"
46
#include "src/diagnostics/unwinding-info-win64.h"
47
#endif  // V8_OS_WIN64
48

49 50
#define TRACE_HEAP(...)                                   \
  do {                                                    \
51
    if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
52 53
  } while (false)

54 55 56 57
namespace v8 {
namespace internal {
namespace wasm {

58 59
using trap_handler::ProtectedInstructionData;

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
base::AddressRegion DisjointAllocationPool::Merge(
    base::AddressRegion new_region) {
  // Find the possible insertion position by identifying the first region whose
  // start address is not less than that of {new_region}. Since there cannot be
  // any overlap between regions, this also means that the start of {above} is
  // bigger or equal than the *end* of {new_region}.
  auto above = regions_.lower_bound(new_region);
  DCHECK(above == regions_.end() || above->begin() >= new_region.end());

  // Check whether to merge with {above}.
  if (above != regions_.end() && new_region.end() == above->begin()) {
    base::AddressRegion merged_region{new_region.begin(),
                                      new_region.size() + above->size()};
    DCHECK_EQ(merged_region.end(), above->end());
    // Check whether to also merge with the region below.
    if (above != regions_.begin()) {
      auto below = above;
      --below;
      if (below->end() == new_region.begin()) {
        merged_region = {below->begin(), below->size() + merged_region.size()};
        regions_.erase(below);
      }
    }
    auto insert_pos = regions_.erase(above);
    regions_.insert(insert_pos, merged_region);
85
    return merged_region;
86 87
  }

88 89 90 91
  // No element below, and not adjavent to {above}: insert and done.
  if (above == regions_.begin()) {
    regions_.insert(above, new_region);
    return new_region;
92
  }
93

94 95
  auto below = above;
  --below;
96
  // Consistency check:
97 98 99 100 101 102 103 104 105 106
  DCHECK(above == regions_.end() || below->end() < above->begin());

  // Adjacent to {below}: merge and done.
  if (below->end() == new_region.begin()) {
    base::AddressRegion merged_region{below->begin(),
                                      below->size() + new_region.size()};
    DCHECK_EQ(merged_region.end(), new_region.end());
    regions_.erase(below);
    regions_.insert(above, merged_region);
    return merged_region;
107
  }
108 109 110 111 112

  // Not adjacent to any existing region: insert between {below} and {above}.
  DCHECK_LT(below->end(), new_region.begin());
  regions_.insert(above, new_region);
  return new_region;
113 114
}

115
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
116 117 118 119 120 121
  return AllocateInRegion(size,
                          {kNullAddress, std::numeric_limits<size_t>::max()});
}

base::AddressRegion DisjointAllocationPool::AllocateInRegion(
    size_t size, base::AddressRegion region) {
122 123 124 125 126 127 128
  // Get an iterator to the first contained region whose start address is not
  // smaller than the start address of {region}. Start the search from the
  // region one before that (the last one whose start address is smaller).
  auto it = regions_.lower_bound(region);
  if (it != regions_.begin()) --it;

  for (auto end = regions_.end(); it != end; ++it) {
129 130 131
    base::AddressRegion overlap = it->GetOverlap(region);
    if (size > overlap.size()) continue;
    base::AddressRegion ret{overlap.begin(), size};
132 133 134 135 136 137 138 139
    base::AddressRegion old = *it;
    auto insert_pos = regions_.erase(it);
    if (size == old.size()) {
      // We use the full region --> nothing to add back.
    } else if (ret.begin() == old.begin()) {
      // We return a region at the start --> shrink old region from front.
      regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
    } else if (ret.end() == old.end()) {
140
      // We return a region at the end --> shrink remaining region.
141
      regions_.insert(insert_pos, {old.begin(), old.size() - size});
142
    } else {
143 144 145 146
      // We return something in the middle --> split the remaining region
      // (insert the region with smaller address first).
      regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
      regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
147
    }
148
    return ret;
149
  }
150
  return {};
151 152
}

153 154
Address WasmCode::constant_pool() const {
  if (FLAG_enable_embedded_constant_pool) {
155
    if (constant_pool_offset_ < code_comments_offset_) {
156
      return instruction_start() + constant_pool_offset_;
157 158
    }
  }
159
  return kNullAddress;
160 161
}

162 163 164 165
Address WasmCode::handler_table() const {
  return instruction_start() + handler_table_offset_;
}

166
int WasmCode::handler_table_size() const {
167
  DCHECK_GE(constant_pool_offset_, handler_table_offset_);
168
  return static_cast<int>(constant_pool_offset_ - handler_table_offset_);
169 170
}

171
Address WasmCode::code_comments() const {
172 173 174
  return instruction_start() + code_comments_offset_;
}

175
int WasmCode::code_comments_size() const {
176
  DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
177
  return static_cast<int>(unpadded_binary_size_ - code_comments_offset_);
178 179
}

180
std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
181
    std::initializer_list<base::Vector<const byte>> vectors) {
182 183 184 185 186 187 188
  size_t total_size = 0;
  for (auto& vec : vectors) total_size += vec.size();
  // Use default-initialization (== no initialization).
  std::unique_ptr<byte[]> result{new byte[total_size]};
  byte* ptr = result.get();
  for (auto& vec : vectors) {
    if (vec.empty()) continue;  // Avoid nullptr in {memcpy}.
189
    memcpy(ptr, vec.begin(), vec.size());
190 191 192 193 194
    ptr += vec.size();
  }
  return result;
}

195
void WasmCode::RegisterTrapHandlerData() {
196
  DCHECK(!has_trap_handler_index());
197
  if (kind() != WasmCode::kWasmFunction) return;
198
  if (protected_instructions_size_ == 0) return;
199 200 201 202

  Address base = instruction_start();

  size_t size = instructions().size();
203
  auto protected_instruction_data = this->protected_instructions();
204
  const int index =
205 206
      RegisterHandlerData(base, size, protected_instruction_data.size(),
                          protected_instruction_data.begin());
207 208 209

  // TODO(eholk): if index is negative, fail.
  CHECK_LE(0, index);
210 211
  set_trap_handler_index(index);
  DCHECK(has_trap_handler_index());
212 213
}

214
bool WasmCode::ShouldBeLogged(Isolate* isolate) {
215 216 217
  // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
  // to call {WasmEngine::EnableCodeLogging} if this return value would change
  // for any isolate. Otherwise we might lose code events.
218
  return isolate->v8_file_logger()->is_listening_to_code_events() ||
219
         isolate->logger()->is_listening_to_code_events() ||
220
         isolate->is_profiling();
221 222
}

223 224 225 226
std::string WasmCode::DebugName() const {
  if (IsAnonymous()) {
    return "anonymous function";
  }
227

228 229
  ModuleWireBytes wire_bytes(native_module()->wire_bytes());
  const WasmModule* module = native_module()->module();
230 231
  WireBytesRef name_ref =
      module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
232
  WasmName name = wire_bytes.GetNameOrNull(name_ref);
233
  std::string name_buffer;
234
  if (kind() == kWasmToJsWrapper) {
235 236 237 238
    name_buffer = "wasm-to-js:";
    size_t prefix_len = name_buffer.size();
    constexpr size_t kMaxSigLength = 128;
    name_buffer.resize(prefix_len + kMaxSigLength);
239
    const FunctionSig* sig = module->functions[index()].sig;
240 241
    size_t sig_length = PrintSignature(
        base::VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
242
    name_buffer.resize(prefix_len + sig_length);
243
    // If the import has a name, also append that (separated by "-").
244 245 246
    if (!name.empty()) {
      name_buffer += '-';
      name_buffer.append(name.begin(), name.size());
247
    }
248
  } else if (name.empty()) {
249 250
    name_buffer.resize(32);
    name_buffer.resize(
251
        SNPrintF(base::VectorOf(&name_buffer.front(), name_buffer.size()),
252
                 "wasm-function[%d]", index()));
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
  } else {
    name_buffer.append(name.begin(), name.end());
  }
  return name_buffer;
}

void WasmCode::LogCode(Isolate* isolate, const char* source_url,
                       int script_id) const {
  DCHECK(ShouldBeLogged(isolate));
  if (IsAnonymous()) return;

  ModuleWireBytes wire_bytes(native_module_->wire_bytes());
  const WasmModule* module = native_module_->module();
  std::string fn_name = DebugName();
  WasmName name = base::VectorOf(fn_name);

  const WasmDebugSymbols& debug_symbols = module->debug_symbols;
  auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
  auto source_map = native_module_->GetWasmSourceMap();
  if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
      !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
    WasmName external_url =
        wire_bytes.GetNameOrNull(debug_symbols.external_url);
    std::string external_url_string(external_url.data(), external_url.size());
    HandleScope scope(isolate);
    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
    Local<v8::String> source_map_str =
        load_wasm_source_map(v8_isolate, external_url_string.c_str());
    native_module_->SetWasmSourceMap(
        std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
283
  }
284

285 286
  // Record source positions before adding code, otherwise when code is added,
  // there are no source positions to associate with the added code.
287
  if (!source_positions().empty()) {
288 289
    LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
                                                           source_positions()));
290
  }
291 292

  int code_offset = module->functions[index_].code.offset();
293 294
  PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kFunction, this,
                                   name, source_url, code_offset, script_id));
295 296
}

297
void WasmCode::Validate() const {
298 299 300
  // The packing strategy for {tagged_parameter_slots} only works if both the
  // max number of parameters and their max combined stack slot usage fits into
  // their respective half of the result value.
301
  static_assert(wasm::kV8MaxWasmFunctionParams <
302 303
                std::numeric_limits<uint16_t>::max());
  static constexpr int kMaxSlotsPerParam = 4;  // S128 on 32-bit platforms.
304
  static_assert(wasm::kV8MaxWasmFunctionParams * kMaxSlotsPerParam <
305 306
                std::numeric_limits<uint16_t>::max());

307
#ifdef DEBUG
308 309
  // Scope for foreign WasmCode pointers.
  WasmCodeRefScope code_ref_scope;
310 311 312 313 314 315 316
  // We expect certain relocation info modes to never appear in {WasmCode}
  // objects or to be restricted to a small set of valid values. Hence the
  // iteration below does not use a mask, but visits all relocation data.
  for (RelocIterator it(instructions(), reloc_info(), constant_pool());
       !it.done(); it.next()) {
    RelocInfo::Mode mode = it.rinfo()->rmode();
    switch (mode) {
317 318
      case RelocInfo::WASM_CALL: {
        Address target = it.rinfo()->wasm_call_address();
319 320 321 322
        WasmCode* code = native_module_->Lookup(target);
        CHECK_NOT_NULL(code);
        CHECK_EQ(WasmCode::kJumpTable, code->kind());
        CHECK(code->contains(target));
323 324
        break;
      }
325 326 327
      case RelocInfo::WASM_STUB_CALL: {
        Address target = it.rinfo()->wasm_stub_call_address();
        WasmCode* code = native_module_->Lookup(target);
328
        CHECK_NOT_NULL(code);
329 330
        CHECK_EQ(WasmCode::kJumpTable, code->kind());
        CHECK(code->contains(target));
331 332
        break;
      }
333 334 335 336 337 338
      case RelocInfo::INTERNAL_REFERENCE:
      case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
        Address target = it.rinfo()->target_internal_reference();
        CHECK(contains(target));
        break;
      }
339 340 341 342 343 344 345 346 347 348 349 350
      case RelocInfo::EXTERNAL_REFERENCE:
      case RelocInfo::CONST_POOL:
      case RelocInfo::VENEER_POOL:
        // These are OK to appear.
        break;
      default:
        FATAL("Unexpected mode: %d", mode);
    }
  }
#endif
}

351
void WasmCode::MaybePrint() const {
352
  // Determines whether flags want this code to be printed.
353 354 355
  bool function_index_matches =
      (!IsAnonymous() &&
       FLAG_print_wasm_code_function_index == static_cast<int>(index()));
356 357
  if (FLAG_print_code || (kind() == kWasmFunction
                              ? (FLAG_print_wasm_code || function_index_matches)
358
                              : FLAG_print_wasm_stub_code.value())) {
359 360
    std::string name = DebugName();
    Print(name.c_str());
361 362 363
  }
}

364
void WasmCode::Print(const char* name) const {
365
  StdoutStream os;
366
  os << "--- WebAssembly code ---\n";
367
  Disassemble(name, os);
368 369 370 371 372 373
  if (native_module_->HasDebugInfo()) {
    if (auto* debug_side_table =
            native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
      debug_side_table->Print(os);
    }
  }
374
  os << "--- End code ---\n";
375 376
}

377
void WasmCode::Disassemble(const char* name, std::ostream& os,
378
                           Address current_pc) const {
379
  if (name) os << "name: " << name << "\n";
380
  if (!IsAnonymous()) os << "index: " << index() << "\n";
381
  os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
382
  if (kind() == kWasmFunction) {
383 384 385 386 387 388
    DCHECK(is_liftoff() || tier() == ExecutionTier::kTurbofan);
    const char* compiler =
        is_liftoff() ? (for_debugging() ? "Liftoff (debug)" : "Liftoff")
                     : "TurboFan";
    os << "compiler: " << compiler << "\n";
  }
389 390 391
  size_t padding = instructions().size() - unpadded_binary_size_;
  os << "Body (size = " << instructions().size() << " = "
     << unpadded_binary_size_ << " + " << padding << " padding)\n";
392

393
  int instruction_size = unpadded_binary_size_;
394
  if (constant_pool_offset_ < instruction_size) {
395 396 397 398 399
    instruction_size = constant_pool_offset_;
  }
  if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
    instruction_size = safepoint_table_offset_;
  }
400
  if (handler_table_offset_ < instruction_size) {
401 402
    instruction_size = handler_table_offset_;
  }
403
  DCHECK_LT(0, instruction_size);
404 405

#ifdef ENABLE_DISASSEMBLER
406
  os << "Instructions (size = " << instruction_size << ")\n";
407
  Disassembler::Decode(nullptr, os, instructions().begin(),
408
                       instructions().begin() + instruction_size,
409
                       CodeReference(this), current_pc);
410 411
  os << "\n";

412
  if (handler_table_size() > 0) {
413
    HandlerTable table(this);
414 415 416 417 418 419
    os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
       << "):\n";
    table.HandlerTableReturnPrint(os);
    os << "\n";
  }

420
  if (protected_instructions_size_ > 0) {
421 422 423 424 425 426 427 428
    os << "Protected instructions:\n pc offset  land pad\n";
    for (auto& data : protected_instructions()) {
      os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
         << std::hex << data.landing_offset << "\n";
    }
    os << "\n";
  }

429
  if (!source_positions().empty()) {
430 431 432 433 434 435
    os << "Source positions:\n pc offset  position\n";
    for (SourcePositionTableIterator it(source_positions()); !it.done();
         it.Advance()) {
      os << std::setw(10) << std::hex << it.code_offset() << std::dec
         << std::setw(10) << it.source_position().ScriptOffset()
         << (it.is_statement() ? "  statement" : "") << "\n";
436
    }
437
    os << "\n";
438
  }
439

440
  if (safepoint_table_offset_ > 0) {
441
    SafepointTable table(this);
442
    table.Print(os);
443 444 445
    os << "\n";
  }

446
  os << "RelocInfo (size = " << reloc_info().size() << ")\n";
447 448
  for (RelocIterator it(instructions(), reloc_info(), constant_pool());
       !it.done(); it.next()) {
449
    it.rinfo()->Print(nullptr, os);
450 451
  }
  os << "\n";
452 453 454 455 456
#else   // !ENABLE_DISASSEMBLER
  os << "Instructions (size = " << instruction_size << ", "
     << static_cast<void*>(instructions().begin()) << "-"
     << static_cast<void*>(instructions().begin() + instruction_size) << ")\n";
#endif  // !ENABLE_DISASSEMBLER
457 458
}

459 460
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
  switch (kind) {
461
    case WasmCode::kWasmFunction:
462
      return "wasm function";
463 464
    case WasmCode::kWasmToCapiWrapper:
      return "wasm-to-capi";
465
    case WasmCode::kWasmToJsWrapper:
466
      return "wasm-to-js";
467 468
    case WasmCode::kJumpTable:
      return "jump table";
469 470 471 472
  }
  return "unknown kind";
}

473
WasmCode::~WasmCode() {
474 475
  if (has_trap_handler_index()) {
    trap_handler::ReleaseHandlerData(trap_handler_index());
476 477 478
  }
}

479
V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
480
  if (GetWasmEngine()->AddPotentiallyDeadCode(this)) {
481 482 483 484 485 486 487
    // The code just became potentially dead. The ref count we wanted to
    // decrement is now transferred to the set of potentially dead code, and
    // will be decremented when the next GC is run.
    return false;
  }
  // If we reach here, the code was already potentially dead. Decrement the ref
  // count, and return true if it drops to zero.
488
  return DecRefOnDeadCode();
489 490 491
}

// static
492
void WasmCode::DecrementRefCount(base::Vector<WasmCode* const> code_vec) {
493 494
  // Decrement the ref counter of all given code objects. Keep the ones whose
  // ref count drops to zero.
495
  WasmEngine::DeadCodeMap dead_code;
496
  for (WasmCode* code : code_vec) {
497 498
    if (!code->DecRef()) continue;  // Remaining references.
    dead_code[code->native_module()].push_back(code);
499 500
  }

501 502 503
  if (dead_code.empty()) return;

  GetWasmEngine()->FreeDeadCode(dead_code);
504 505
}

506 507 508 509 510 511 512 513 514 515
int WasmCode::GetSourcePositionBefore(int offset) {
  int position = kNoSourcePosition;
  for (SourcePositionTableIterator iterator(source_positions());
       !iterator.done() && iterator.code_offset() < offset;
       iterator.Advance()) {
    position = iterator.source_position().ScriptOffset();
  }
  return position;
}

516 517 518
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;

519
WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
520 521 522
    : protect_code_memory_(!V8_HAS_PTHREAD_JIT_WRITE_PROTECT &&
                           FLAG_wasm_write_protect_code_memory &&
                           !WasmCodeManager::MemoryProtectionKeysEnabled()),
523
      async_counters_(std::move(async_counters)) {
524
  owned_code_space_.reserve(4);
525 526 527
}

WasmCodeAllocator::~WasmCodeAllocator() {
528 529
  GetWasmCodeManager()->FreeNativeModule(base::VectorOf(owned_code_space_),
                                         committed_code_space());
530 531
}

532 533 534 535 536 537
void WasmCodeAllocator::Init(VirtualMemory code_space) {
  DCHECK(owned_code_space_.empty());
  DCHECK(free_code_space_.IsEmpty());
  free_code_space_.Merge(code_space.region());
  owned_code_space_.emplace_back(std::move(code_space));
  async_counters_->wasm_module_num_code_spaces()->AddSample(1);
538 539
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
// memory, we append that memory at the end of the owned_code_space_ list, we
// traverse that list in reverse order to find the reservation(s) that guide how
// to chunk the region to commit.
#if V8_OS_WIN
constexpr bool kNeedsToSplitRangeByReservations = true;
#else
constexpr bool kNeedsToSplitRangeByReservations = false;
#endif

base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
    base::AddressRegion range,
    const std::vector<VirtualMemory>& owned_code_space) {
  if (!kNeedsToSplitRangeByReservations) return {range};

  base::SmallVector<base::AddressRegion, 1> split_ranges;
558 559
  size_t missing_begin = range.begin();
  size_t missing_end = range.end();
560
  for (auto& vmem : base::Reversed(owned_code_space)) {
561 562 563 564 565 566 567 568 569
    Address overlap_begin = std::max(missing_begin, vmem.address());
    Address overlap_end = std::min(missing_end, vmem.end());
    if (overlap_begin >= overlap_end) continue;
    split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
    // Opportunistically reduce the missing range. This might terminate the loop
    // early.
    if (missing_begin == overlap_begin) missing_begin = overlap_end;
    if (missing_end == overlap_end) missing_end = overlap_begin;
    if (missing_begin >= missing_end) break;
570 571 572 573 574 575 576 577 578
  }
#ifdef ENABLE_SLOW_DCHECKS
  // The returned vector should cover the full range.
  size_t total_split_size = 0;
  for (auto split : split_ranges) total_split_size += split.size();
  DCHECK_EQ(range.size(), total_split_size);
#endif
  return split_ranges;
}
579 580

int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
581
  return NativeModule::kNeedsFarJumpsBetweenCodeSpaces
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
             ? static_cast<int>(num_declared_functions)
             : 0;
}

// Returns an overapproximation of the code size overhead per new code space
// created by the jump tables.
size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
  // Overhead for the jump table.
  size_t overhead = RoundUp<kCodeAlignment>(
      JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));

#if defined(V8_OS_WIN64)
  // On Win64, we need to reserve some pages at the beginning of an executable
  // space. See {AddCodeSpace}.
  overhead += Heap::GetCodeRangeReservedAreaSize();
#endif  // V8_OS_WIN64

  // Overhead for the far jump table.
  overhead +=
      RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
          WasmCode::kRuntimeStubCount,
          NumWasmFunctionsInFarJumpTable(num_declared_functions)));

  return overhead;
}

608 609 610
// Returns an estimate how much code space should be reserved.
size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
                       size_t total_reserved) {
611 612
  size_t overhead = OverheadPerCodeSpace(num_declared_functions);

613
  // Reserve the maximum of
614 615 616
  //   a) needed size + overhead (this is the minimum needed)
  //   b) 2 * overhead (to not waste too much space by overhead)
  //   c) 1/4 of current total reservation size (to grow exponentially)
617
  size_t minimum_size = 2 * overhead;
618
  size_t suggested_size =
619
      std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
620
                        minimum_size),
621 622 623
               total_reserved / 4);

  if (V8_UNLIKELY(minimum_size > WasmCodeAllocator::kMaxCodeSpaceSize)) {
624 625 626 627
    auto oom_detail = base::FormattedString{}
                      << "required reservation minimum (" << minimum_size
                      << ") is bigger than supported maximum ("
                      << WasmCodeAllocator::kMaxCodeSpaceSize << ")";
628 629
    V8::FatalProcessOutOfMemory(nullptr,
                                "Exceeding maximum wasm code space size",
630
                                oom_detail.PrintToArray().data());
631 632
    UNREACHABLE();
  }
633 634

  // Limit by the maximum supported code space size.
635 636 637
  size_t reserve_size =
      std::min(WasmCodeAllocator::kMaxCodeSpaceSize, suggested_size);

638
  return reserve_size;
639 640
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
#ifdef DEBUG
// Check postconditions when returning from this method:
// 1) {region} must be fully contained in {writable_memory_};
// 2) {writable_memory_} must be a maximally merged ordered set of disjoint
//    non-empty regions.
class CheckWritableMemoryRegions {
 public:
  CheckWritableMemoryRegions(
      std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
          writable_memory,
      base::AddressRegion new_region, size_t& new_writable_memory)
      : writable_memory_(writable_memory),
        new_region_(new_region),
        new_writable_memory_(new_writable_memory),
        old_writable_size_(std::accumulate(
            writable_memory_.begin(), writable_memory_.end(), size_t{0},
            [](size_t old, base::AddressRegion region) {
              return old + region.size();
            })) {}

  ~CheckWritableMemoryRegions() {
    // {new_region} must be contained in {writable_memory_}.
    DCHECK(std::any_of(
        writable_memory_.begin(), writable_memory_.end(),
        [this](auto region) { return region.contains(new_region_); }));

    // The new total size of writable memory must have increased by
    // {new_writable_memory}.
    size_t total_writable_size = std::accumulate(
        writable_memory_.begin(), writable_memory_.end(), size_t{0},
        [](size_t old, auto region) { return old + region.size(); });
    DCHECK_EQ(old_writable_size_ + new_writable_memory_, total_writable_size);

    // There are no empty regions.
    DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
                        [](auto region) { return region.is_empty(); }));

678 679 680 681 682 683 684
    // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
    // so USE is required to prevent build failures in debug builds).
    USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
                        Address{0}, [](Address previous_end, auto region) {
                          DCHECK_LT(previous_end, region.begin());
                          return region.end();
                        }));
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
  }

 private:
  const std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
      writable_memory_;
  const base::AddressRegion new_region_;
  const size_t& new_writable_memory_;
  const size_t old_writable_size_;
};
#else   // !DEBUG
class CheckWritableMemoryRegions {
 public:
  template <typename... Args>
  explicit CheckWritableMemoryRegions(Args...) {}
};
#endif  // !DEBUG

702 703 704 705 706
// Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
// restriction on the region to allocate in.
constexpr base::AddressRegion kUnrestrictedRegion{
    kNullAddress, std::numeric_limits<size_t>::max()};

707 708
}  // namespace

709 710
base::Vector<byte> WasmCodeAllocator::AllocateForCode(
    NativeModule* native_module, size_t size) {
711
  return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
712 713
}

714
base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
715
    NativeModule* native_module, size_t size, base::AddressRegion region) {
716
  DCHECK_LT(0, size);
717
  auto* code_manager = GetWasmCodeManager();
718
  size = RoundUp<kCodeAlignment>(size);
719 720
  base::AddressRegion code_space =
      free_code_space_.AllocateInRegion(size, region);
721 722 723 724 725
  if (V8_UNLIKELY(code_space.is_empty())) {
    // Only allocations without a specific region are allowed to fail. Otherwise
    // the region must have been allocated big enough to hold all initial
    // allocations (jump tables etc).
    CHECK_EQ(kUnrestrictedRegion, region);
726 727 728 729

    Address hint = owned_code_space_.empty() ? kNullAddress
                                             : owned_code_space_.back().end();

730 731
    size_t total_reserved = 0;
    for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
732
    size_t reserve_size = ReservationSize(
733
        size, native_module->module()->num_declared_functions, total_reserved);
734
    VirtualMemory new_mem =
735
        code_manager->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
736
    if (!new_mem.IsReserved()) {
737 738 739
      auto oom_detail = base::FormattedString{}
                        << "cannot allocate more code space (" << reserve_size
                        << " bytes, currently " << total_reserved << ")";
740
      V8::FatalProcessOutOfMemory(nullptr, "Grow wasm code space",
741
                                  oom_detail.PrintToArray().data());
742 743 744
      UNREACHABLE();
    }

745
    base::AddressRegion new_region = new_mem.region();
746
    code_manager->AssignRange(new_region, native_module);
747
    free_code_space_.Merge(new_region);
748
    owned_code_space_.emplace_back(std::move(new_mem));
749
    native_module->AddCodeSpaceLocked(new_region);
750

751 752
    code_space = free_code_space_.Allocate(size);
    DCHECK(!code_space.is_empty());
753 754
    async_counters_->wasm_module_num_code_spaces()->AddSample(
        static_cast<int>(owned_code_space_.size()));
755
  }
756
  const Address commit_page_size = CommitPageSize();
757
  Address commit_start = RoundUp(code_space.begin(), commit_page_size);
758 759 760 761
  if (commit_start != code_space.begin()) {
    MakeWritable({commit_start - commit_page_size, commit_page_size});
  }

762
  Address commit_end = RoundUp(code_space.end(), commit_page_size);
763 764 765 766 767 768 769 770 771
  // {commit_start} will be either code_space.start or the start of the next
  // page. {commit_end} will be the start of the page after the one in which
  // the allocation ends.
  // We start from an aligned start, and we know we allocated vmem in
  // page multiples.
  // We just need to commit what's not committed. The page in which we
  // start is already committed (or we start at the beginning of a page).
  // The end needs to be committed all through the end of the page.
  if (commit_start < commit_end) {
772 773
    for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
             {commit_start, commit_end - commit_start}, owned_code_space_)) {
774
      code_manager->Commit(split_range);
775
    }
776 777 778
    committed_code_space_.fetch_add(commit_end - commit_start);
    // Committed code cannot grow bigger than maximum code space size.
    DCHECK_LE(committed_code_space_.load(), FLAG_wasm_max_code_space * MB);
779 780 781 782 783
    if (protect_code_memory_) {
      DCHECK_LT(0, writers_count_);
      InsertIntoWritableRegions({commit_start, commit_end - commit_start},
                                false);
    }
784 785 786
  }
  DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
  allocated_code_space_.Merge(code_space);
787
  generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
788

789 790
  TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
             code_space.begin(), size);
791 792 793
  return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}

794 795
// TODO(dlehmann): Ensure that {AddWriter()} is always paired up with a
// {RemoveWriter}, such that eventually the code space is write protected.
796
// One solution is to make the API foolproof by hiding {SetWritable()} and
797
// allowing change of permissions only through {CodeSpaceWriteScope}.
798 799
// TODO(dlehmann): Add tests that ensure the code space is eventually write-
// protected.
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
void WasmCodeAllocator::AddWriter() {
  DCHECK(protect_code_memory_);
  ++writers_count_;
}

void WasmCodeAllocator::RemoveWriter() {
  DCHECK(protect_code_memory_);
  DCHECK_GT(writers_count_, 0);
  if (--writers_count_ > 0) return;

  // Switch all memory to non-writable.
  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
  for (base::AddressRegion writable : writable_memory_) {
    for (base::AddressRegion split_range :
         SplitRangeByReservationsIfNeeded(writable, owned_code_space_)) {
      TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RX\n",
                 split_range.begin(), split_range.end());
      CHECK(SetPermissions(page_allocator, split_range.begin(),
                           split_range.size(), PageAllocator::kReadExecute));
819 820
    }
  }
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
  writable_memory_.clear();
}

void WasmCodeAllocator::MakeWritable(base::AddressRegion region) {
  if (!protect_code_memory_) return;
  DCHECK_LT(0, writers_count_);
  DCHECK(!region.is_empty());
  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();

  // Align to commit page size.
  size_t commit_page_size = page_allocator->CommitPageSize();
  DCHECK(base::bits::IsPowerOfTwo(commit_page_size));
  Address begin = RoundDown(region.begin(), commit_page_size);
  Address end = RoundUp(region.end(), commit_page_size);
  region = base::AddressRegion(begin, end - begin);

  InsertIntoWritableRegions(region, true);
838 839
}

840
void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
841 842 843 844 845 846 847 848 849 850
  // Zap code area and collect freed code regions.
  DisjointAllocationPool freed_regions;
  size_t code_size = 0;
  for (WasmCode* code : codes) {
    code_size += code->instructions().size();
    freed_regions.Merge(base::AddressRegion{code->instruction_start(),
                                            code->instructions().size()});
  }
  freed_code_size_.fetch_add(code_size);

851 852 853 854
  // Merge {freed_regions} into {freed_code_space_} and put all ranges of full
  // pages to decommit into {regions_to_decommit} (decommitting is expensive,
  // so try to merge regions before decommitting).
  DisjointAllocationPool regions_to_decommit;
855
  size_t commit_page_size = CommitPageSize();
856 857 858 859 860 861 862 863 864 865
  for (auto region : freed_regions.regions()) {
    auto merged_region = freed_code_space_.Merge(region);
    Address discard_start =
        std::max(RoundUp(merged_region.begin(), commit_page_size),
                 RoundDown(region.begin(), commit_page_size));
    Address discard_end =
        std::min(RoundDown(merged_region.end(), commit_page_size),
                 RoundUp(region.end(), commit_page_size));
    if (discard_start >= discard_end) continue;
    regions_to_decommit.Merge({discard_start, discard_end - discard_start});
866 867
  }

868
  auto* code_manager = GetWasmCodeManager();
869 870 871
  for (auto region : regions_to_decommit.regions()) {
    size_t old_committed = committed_code_space_.fetch_sub(region.size());
    DCHECK_GE(old_committed, region.size());
872
    USE(old_committed);
873 874
    for (base::AddressRegion split_range :
         SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
875
      code_manager->Decommit(split_range);
876
    }
877 878 879
  }
}

880 881 882 883
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
  return owned_code_space_.size();
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region,
                                                  bool switch_to_writable) {
  size_t new_writable_memory = 0;

  CheckWritableMemoryRegions check_on_return{writable_memory_, region,
                                             new_writable_memory};

  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
  // Subroutine to make a non-writable region writable (if {switch_to_writable}
  // is {true}) and insert it into {writable_memory_}.
  auto make_writable = [&](decltype(writable_memory_)::iterator insert_pos,
                           base::AddressRegion region) {
    new_writable_memory += region.size();
    if (switch_to_writable) {
      for (base::AddressRegion split_range :
           SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
        TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RWX\n",
                   split_range.begin(), split_range.end());
        CHECK(SetPermissions(page_allocator, split_range.begin(),
                             split_range.size(),
                             PageAllocator::kReadWriteExecute));
      }
    }

    // Insert {region} into {writable_memory_} before {insert_pos}, potentially
    // merging it with the surrounding regions.
    if (insert_pos != writable_memory_.begin()) {
      auto previous = insert_pos;
      --previous;
      if (previous->end() == region.begin()) {
        region = {previous->begin(), previous->size() + region.size()};
        writable_memory_.erase(previous);
      }
    }
918 919
    if (insert_pos != writable_memory_.end() &&
        region.end() == insert_pos->begin()) {
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
      region = {region.begin(), insert_pos->size() + region.size()};
      insert_pos = writable_memory_.erase(insert_pos);
    }
    writable_memory_.insert(insert_pos, region);
  };

  DCHECK(!region.is_empty());
  // Find a possible insertion position by identifying the first region whose
  // start address is not less than that of {new_region}, and the starting the
  // merge from the existing region before that.
  auto it = writable_memory_.lower_bound(region);
  if (it != writable_memory_.begin()) --it;
  for (;; ++it) {
    if (it == writable_memory_.end() || it->begin() >= region.end()) {
      // No overlap; add before {it}.
      make_writable(it, region);
      return;
    }
    if (it->end() <= region.begin()) continue;  // Continue after {it}.
    base::AddressRegion overlap = it->GetOverlap(region);
    DCHECK(!overlap.is_empty());
    if (overlap.begin() == region.begin()) {
      if (overlap.end() == region.end()) return;  // Fully contained already.
      // Remove overlap (which is already writable) and continue.
      region = {overlap.end(), region.end() - overlap.end()};
      continue;
    }
    if (overlap.end() == region.end()) {
      // Remove overlap (which is already writable), then make the remaining
      // region writable.
      region = {region.begin(), overlap.begin() - region.begin()};
      make_writable(it, region);
      return;
    }
    // Split {region}, make the split writable, and continue with the rest.
    base::AddressRegion split = {region.begin(),
                                 overlap.begin() - region.begin()};
    make_writable(it, split);
    region = {overlap.end(), region.end() - overlap.end()};
  }
}

962 963 964 965 966 967 968 969 970 971 972
namespace {
BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
  if (!FLAG_wasm_bounds_checks) return kNoBoundsChecks;
  if (FLAG_wasm_enforce_bounds_checks) return kExplicitBoundsChecks;
  // We do not have trap handler support for memory64 yet.
  if (module->is_memory64) return kExplicitBoundsChecks;
  if (trap_handler::IsTrapHandlerEnabled()) return kTrapHandler;
  return kExplicitBoundsChecks;
}
}  // namespace

973
NativeModule::NativeModule(const WasmFeatures& enabled,
974
                           DynamicTiering dynamic_tiering,
975
                           VirtualMemory code_space,
976
                           std::shared_ptr<const WasmModule> module,
977 978
                           std::shared_ptr<Counters> async_counters,
                           std::shared_ptr<NativeModule>* shared_this)
979 980
    : engine_scope_(
          GetWasmEngine()->GetBarrierForBackgroundCompile()->TryLock()),
981
      code_allocator_(async_counters),
982
      enabled_features_(enabled),
983
      module_(std::move(module)),
984
      import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
985
          new WasmImportWrapperCache())),
986
      bounds_checks_(GetBoundsChecks(module_.get())) {
987
  DCHECK(engine_scope_);
988 989 990 991 992
  // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
  // there.
  DCHECK_NOT_NULL(shared_this);
  DCHECK_NULL(*shared_this);
  shared_this->reset(this);
993 994
  compilation_state_ = CompilationState::New(
      *shared_this, std::move(async_counters), dynamic_tiering);
995
  compilation_state_->InitCompileJob();
996
  DCHECK_NOT_NULL(module_);
997
  if (module_->num_declared_functions > 0) {
998 999
    code_table_ =
        std::make_unique<WasmCode*[]>(module_->num_declared_functions);
1000
    tiering_budgets_ =
1001
        std::make_unique<uint32_t[]>(module_->num_declared_functions);
1002

1003 1004
    std::fill_n(tiering_budgets_.get(), module_->num_declared_functions,
                FLAG_wasm_tiering_budget);
1005
  }
1006 1007 1008 1009
  // Even though there cannot be another thread using this object (since we are
  // just constructing it), we need to hold the mutex to fulfill the
  // precondition of {WasmCodeAllocator::Init}, which calls
  // {NativeModule::AddCodeSpaceLocked}.
1010
  base::RecursiveMutexGuard guard{&allocation_mutex_};
1011 1012 1013
  auto initial_region = code_space.region();
  code_allocator_.Init(std::move(code_space));
  AddCodeSpaceLocked(initial_region);
1014 1015
}

1016
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
1017
  WasmCodeRefScope code_ref_scope;
1018 1019
  DCHECK_LE(module_->num_declared_functions, max_functions);
  auto new_table = std::make_unique<WasmCode*[]>(max_functions);
1020
  if (module_->num_declared_functions > 0) {
1021 1022
    memcpy(new_table.get(), code_table_.get(),
           module_->num_declared_functions * sizeof(WasmCode*));
1023
  }
1024
  code_table_ = std::move(new_table);
1025

1026
  base::AddressRegion single_code_space_region;
1027 1028 1029
  base::RecursiveMutexGuard guard(&allocation_mutex_);
  CHECK_EQ(1, code_space_data_.size());
  single_code_space_region = code_space_data_[0].region;
1030
  // Re-allocate jump table.
1031 1032 1033
  main_jump_table_ = CreateEmptyJumpTableInRegionLocked(
      JumpTableAssembler::SizeForNumberOfSlots(max_functions),
      single_code_space_region);
1034
  code_space_data_[0].jump_table = main_jump_table_;
1035 1036
}

1037 1038
void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
  DisallowGarbageCollection no_gc;
1039
  if (!WasmCode::ShouldBeLogged(isolate)) return;
1040

1041
  TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "functions",
1042 1043
               module_->num_declared_functions);

1044 1045
  Object url_obj = script.name();
  DCHECK(url_obj.IsString() || url_obj.IsUndefined());
1046
  std::unique_ptr<char[]> source_url =
1047
      url_obj.IsString() ? String::cast(url_obj).ToCString() : nullptr;
1048

1049 1050
  // Log all owned code, not just the current entries in the code table. This
  // will also include import wrappers.
1051 1052 1053
  WasmCodeRefScope code_ref_scope;
  for (auto& code : SnapshotAllOwnedCode()) {
    code->LogCode(isolate, source_url.get(), script.id());
1054
  }
1055 1056
}

1057
CompilationEnv NativeModule::CreateCompilationEnv() const {
1058 1059
  return {module(), bounds_checks_, kRuntimeExceptionSupport, enabled_features_,
          compilation_state()->dynamic_tiering()};
1060 1061
}

1062
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
1063
  CodeSpaceWriteScope code_space_write_scope(this);
1064
  const size_t relocation_size = code->relocation_size();
1065
  base::OwnedVector<byte> reloc_info;
1066
  if (relocation_size > 0) {
1067 1068
    reloc_info = base::OwnedVector<byte>::Of(
        base::Vector<byte>{code->relocation_start(), relocation_size});
1069
  }
1070
  Handle<ByteArray> source_pos_table(code->source_position_table(),
1071
                                     code->GetIsolate());
1072 1073
  base::OwnedVector<byte> source_pos =
      base::OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
1074 1075 1076 1077
  if (source_pos_table->length() > 0) {
    source_pos_table->copy_out(0, source_pos.start(),
                               source_pos_table->length());
  }
1078
  CHECK(!code->is_off_heap_trampoline());
1079
  static_assert(Code::kOnHeapBodyIsContiguous);
1080
  base::Vector<const byte> instructions(
1081 1082
      reinterpret_cast<byte*>(code->raw_body_start()),
      static_cast<size_t>(code->raw_body_size()));
1083
  const int stack_slots = code->stack_slots();
1084

1085 1086
  // Metadata offsets in Code objects are relative to the start of the metadata
  // section, whereas WasmCode expects offsets relative to InstructionStart.
1087
  const int base_offset = code->raw_instruction_size();
1088 1089 1090
  // TODO(jgruber,v8:8758): Remove this translation. It exists only because
  // Code objects contains real offsets but WasmCode expects an offset of 0 to
  // mean 'empty'.
1091
  const int safepoint_table_offset =
1092 1093 1094 1095 1096
      code->has_safepoint_table() ? base_offset + code->safepoint_table_offset()
                                  : 0;
  const int handler_table_offset = base_offset + code->handler_table_offset();
  const int constant_pool_offset = base_offset + code->constant_pool_offset();
  const int code_comments_offset = base_offset + code->code_comments_offset();
1097

1098
  base::RecursiveMutexGuard guard{&allocation_mutex_};
1099
  base::Vector<uint8_t> dst_code_bytes =
1100
      code_allocator_.AllocateForCode(this, instructions.size());
1101
  memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
1102

1103
  // Apply the relocation delta by iterating over the RelocInfo.
1104
  intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
1105
                   code->raw_instruction_start();
1106 1107
  int mode_mask =
      RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
1108
  auto jump_tables_ref =
1109
      FindJumpTablesForRegionLocked(base::AddressRegionOf(dst_code_bytes));
1110 1111
  Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
  Address constant_pool_start = dst_code_addr + constant_pool_offset;
1112
  RelocIterator orig_it(*code, mode_mask);
1113 1114
  for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
                        constant_pool_start, mode_mask);
1115
       !it.done(); it.next(), orig_it.next()) {
1116 1117
    RelocInfo::Mode mode = it.rinfo()->rmode();
    if (RelocInfo::IsWasmStubCall(mode)) {
1118
      uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
1119
      DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
1120
      Address entry = GetNearRuntimeStubEntry(
1121
          static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
1122
      it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
1123
    } else {
1124
      it.rinfo()->apply(delta);
1125 1126
    }
  }
1127

1128
  // Flush the i-cache after relocation.
1129
  FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
1130

1131
  std::unique_ptr<WasmCode> new_code{
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
      new WasmCode{this,                     // native_module
                   kAnonymousFuncIndex,      // index
                   dst_code_bytes,           // instructions
                   stack_slots,              // stack_slots
                   0,                        // tagged_parameter_slots
                   safepoint_table_offset,   // safepoint_table_offset
                   handler_table_offset,     // handler_table_offset
                   constant_pool_offset,     // constant_pool_offset
                   code_comments_offset,     // code_comments_offset
                   instructions.length(),    // unpadded_binary_size
                   {},                       // protected_instructions
                   reloc_info.as_vector(),   // reloc_info
                   source_pos.as_vector(),   // source positions
                   WasmCode::kWasmFunction,  // kind
                   ExecutionTier::kNone,     // tier
                   kNoDebugging}};           // for_debugging
1148
  new_code->MaybePrint();
1149 1150
  new_code->Validate();

1151
  return PublishCodeLocked(std::move(new_code));
1152 1153
}

1154 1155 1156 1157
void NativeModule::UseLazyStub(uint32_t func_index) {
  DCHECK_LE(module_->num_imported_functions, func_index);
  DCHECK_LT(func_index,
            module_->num_imported_functions + module_->num_declared_functions);
1158 1159 1160
  // Avoid opening a new write scope per function. The caller should hold the
  // scope instead.
  DCHECK(CodeSpaceWriteScope::IsInScope());
1161

1162
  base::RecursiveMutexGuard guard(&allocation_mutex_);
1163 1164 1165
  if (!lazy_compile_table_) {
    uint32_t num_slots = module_->num_declared_functions;
    WasmCodeRefScope code_ref_scope;
1166 1167 1168 1169 1170 1171
    lazy_compile_table_ = CreateEmptyJumpTableLocked(
        JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
    Address compile_lazy_address = GetNearRuntimeStubEntry(
        WasmCode::kWasmCompileLazy,
        FindJumpTablesForRegionLocked(
            base::AddressRegionOf(lazy_compile_table_->instructions())));
1172 1173
    JumpTableAssembler::GenerateLazyCompileTable(
        lazy_compile_table_->instruction_start(), num_slots,
1174
        module_->num_imported_functions, compile_lazy_address);
1175 1176 1177
  }

  // Add jump table entry for jump to the lazy compile stub.
1178
  uint32_t slot_index = declared_function_index(module(), func_index);
1179
  DCHECK_NULL(code_table_[slot_index]);
1180 1181 1182
  Address lazy_compile_target =
      lazy_compile_table_->instruction_start() +
      JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
1183
  PatchJumpTablesLocked(slot_index, lazy_compile_target);
1184 1185
}

1186
std::unique_ptr<WasmCode> NativeModule::AddCode(
1187
    int index, const CodeDesc& desc, int stack_slots,
1188
    uint32_t tagged_parameter_slots,
1189 1190
    base::Vector<const byte> protected_instructions_data,
    base::Vector<const byte> source_position_table, WasmCode::Kind kind,
1191
    ExecutionTier tier, ForDebugging for_debugging) {
1192
  base::Vector<byte> code_space;
1193 1194
  NativeModule::JumpTablesRef jump_table_ref;
  {
1195
    base::RecursiveMutexGuard guard{&allocation_mutex_};
1196 1197 1198 1199
    code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
    jump_table_ref =
        FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
  }
1200
  return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
1201
                              protected_instructions_data,
1202 1203
                              source_position_table, kind, tier, for_debugging,
                              code_space, jump_table_ref);
1204 1205 1206
}

std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
1207
    int index, const CodeDesc& desc, int stack_slots,
1208
    uint32_t tagged_parameter_slots,
1209 1210
    base::Vector<const byte> protected_instructions_data,
    base::Vector<const byte> source_position_table, WasmCode::Kind kind,
1211
    ExecutionTier tier, ForDebugging for_debugging,
1212 1213 1214 1215
    base::Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
  base::Vector<byte> reloc_info{
      desc.buffer + desc.buffer_size - desc.reloc_size,
      static_cast<size_t>(desc.reloc_size)};
1216
  UpdateCodeSize(desc.instr_size, tier, for_debugging);
1217

1218 1219 1220
  // TODO(jgruber,v8:8758): Remove this translation. It exists only because
  // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
  // 'empty'.
1221 1222 1223 1224 1225 1226
  const int safepoint_table_offset =
      desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
  const int handler_table_offset = desc.handler_table_offset;
  const int constant_pool_offset = desc.constant_pool_offset;
  const int code_comments_offset = desc.code_comments_offset;
  const int instr_size = desc.instr_size;
1227

1228 1229
  memcpy(dst_code_bytes.begin(), desc.buffer,
         static_cast<size_t>(desc.instr_size));
1230

1231
  // Apply the relocation delta by iterating over the RelocInfo.
1232
  intptr_t delta = dst_code_bytes.begin() - desc.buffer;
1233
  int mode_mask = RelocInfo::kApplyMask |
1234
                  RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
1235
                  RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
1236 1237
  Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
  Address constant_pool_start = code_start + constant_pool_offset;
1238 1239
  for (RelocIterator it(dst_code_bytes, reloc_info, constant_pool_start,
                        mode_mask);
1240 1241
       !it.done(); it.next()) {
    RelocInfo::Mode mode = it.rinfo()->rmode();
1242 1243
    if (RelocInfo::IsWasmCall(mode)) {
      uint32_t call_tag = it.rinfo()->wasm_call_tag();
1244
      Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
1245 1246 1247
      it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
    } else if (RelocInfo::IsWasmStubCall(mode)) {
      uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
1248
      DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
1249
      Address entry = GetNearRuntimeStubEntry(
1250
          static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables);
1251
      it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
1252 1253 1254 1255
    } else {
      it.rinfo()->apply(delta);
    }
  }
1256

1257 1258 1259
  // Flush the i-cache after relocation.
  FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());

1260 1261 1262 1263
  // Liftoff code will not be relocated or serialized, thus do not store any
  // relocation information.
  if (tier == ExecutionTier::kLiftoff) reloc_info = {};

1264 1265 1266
  std::unique_ptr<WasmCode> code{new WasmCode{
      this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
      safepoint_table_offset, handler_table_offset, constant_pool_offset,
1267
      code_comments_offset, instr_size, protected_instructions_data, reloc_info,
1268
      source_position_table, kind, tier, for_debugging}};
1269

1270 1271 1272
  code->MaybePrint();
  code->Validate();

1273 1274
  return code;
}
1275

1276
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
1277 1278
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
               "wasm.PublishCode");
1279
  base::RecursiveMutexGuard lock(&allocation_mutex_);
1280
  CodeSpaceWriteScope code_space_write_scope(this);
1281 1282 1283
  return PublishCodeLocked(std::move(code));
}

1284
std::vector<WasmCode*> NativeModule::PublishCode(
1285
    base::Vector<std::unique_ptr<WasmCode>> codes) {
1286 1287 1288
  // Publishing often happens in a loop, so the caller should hold the
  // {CodeSpaceWriteScope} outside of such a loop.
  DCHECK(CodeSpaceWriteScope::IsInScope());
1289 1290
  TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
               "wasm.PublishCode", "number", codes.size());
1291 1292
  std::vector<WasmCode*> published_code;
  published_code.reserve(codes.size());
1293
  base::RecursiveMutexGuard lock(&allocation_mutex_);
1294 1295 1296 1297 1298 1299 1300
  // The published code is put into the top-most surrounding {WasmCodeRefScope}.
  for (auto& code : codes) {
    published_code.push_back(PublishCodeLocked(std::move(code)));
  }
  return published_code;
}

1301 1302 1303 1304 1305
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
  switch (result.kind) {
    case WasmCompilationResult::kWasmToJsWrapper:
      return WasmCode::Kind::kWasmToJsWrapper;
    case WasmCompilationResult::kFunction:
1306
      return WasmCode::Kind::kWasmFunction;
1307
    default:
1308
      UNREACHABLE();
1309 1310 1311
  }
}

1312 1313
WasmCode* NativeModule::PublishCodeLocked(
    std::unique_ptr<WasmCode> owned_code) {
1314
  allocation_mutex_.AssertHeld();
1315

1316 1317 1318
  WasmCode* code = owned_code.get();
  new_owned_code_.emplace_back(std::move(owned_code));

1319 1320
  // Add the code to the surrounding code ref scope, so the returned pointer is
  // guaranteed to be valid.
1321
  WasmCodeRefScope::AddRef(code);
1322

1323
  if (code->index() < static_cast<int>(module_->num_imported_functions)) {
1324 1325 1326 1327 1328 1329 1330
    return code;
  }

  DCHECK_LT(code->index(), num_functions());

  code->RegisterTrapHandlerData();

1331 1332 1333
  // Put the code in the debugging cache, if needed.
  if (V8_UNLIKELY(cached_code_)) InsertToCodeCache(code);

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
  // Assume an order of execution tiers that represents the quality of their
  // generated code.
  static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
                    ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
                "Assume an order on execution tiers");

  uint32_t slot_idx = declared_function_index(module(), code->index());
  WasmCode* prior_code = code_table_[slot_idx];
  // If we are tiered down, install all debugging code (except for stepping
  // code, which is only used for a single frame and never installed in the
  // code table of jump table). Otherwise, install code if it was compiled
  // with a higher tier.
  static_assert(
      kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
      "for_debugging is ordered");
  const bool update_code_table =
      // Never install stepping code.
      code->for_debugging() != kForStepping &&
      (!prior_code ||
       (tiering_state_ == kTieredDown
            // Tiered down: Install breakpoints over normal debug code.
            ? prior_code->for_debugging() <= code->for_debugging()
1356 1357 1358 1359
            // Tiered up: Install if the tier is higher than before or we
            // replace debugging code with non-debugging code.
            : (prior_code->tier() < code->tier() ||
               (prior_code->for_debugging() && !code->for_debugging()))));
1360 1361 1362 1363 1364 1365 1366
  if (update_code_table) {
    code_table_[slot_idx] = code;
    if (prior_code) {
      WasmCodeRefScope::AddRef(prior_code);
      // The code is added to the current {WasmCodeRefScope}, hence the ref
      // count cannot drop to zero here.
      prior_code->DecRefOnLiveCode();
1367
    }
1368 1369 1370 1371 1372 1373 1374

    PatchJumpTablesLocked(slot_idx, code->instruction_start());
  } else {
    // The code tables does not hold a reference to the code, hence decrement
    // the initial ref count of 1. The code was added to the
    // {WasmCodeRefScope} though, so it cannot die here.
    code->DecRefOnLiveCode();
1375
  }
1376 1377

  return code;
1378 1379
}

1380
void NativeModule::ReinstallDebugCode(WasmCode* code) {
1381
  base::RecursiveMutexGuard lock(&allocation_mutex_);
1382 1383 1384 1385 1386 1387

  DCHECK_EQ(this, code->native_module());
  DCHECK_EQ(kWithBreakpoints, code->for_debugging());
  DCHECK(!code->IsAnonymous());
  DCHECK_LE(module_->num_imported_functions, code->index());
  DCHECK_LT(code->index(), num_functions());
1388 1389 1390

  // If the module is tiered up by now, do not reinstall debug code.
  if (tiering_state_ != kTieredDown) return;
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401

  uint32_t slot_idx = declared_function_index(module(), code->index());
  if (WasmCode* prior_code = code_table_[slot_idx]) {
    WasmCodeRefScope::AddRef(prior_code);
    // The code is added to the current {WasmCodeRefScope}, hence the ref
    // count cannot drop to zero here.
    prior_code->DecRefOnLiveCode();
  }
  code_table_[slot_idx] = code;
  code->IncRef();

1402
  CodeSpaceWriteScope code_space_write_scope(this);
1403 1404 1405
  PatchJumpTablesLocked(slot_idx, code->instruction_start());
}

1406
std::pair<base::Vector<uint8_t>, NativeModule::JumpTablesRef>
1407
NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
1408
  base::RecursiveMutexGuard guard{&allocation_mutex_};
1409
  base::Vector<uint8_t> code_space =
1410 1411 1412 1413
      code_allocator_.AllocateForCode(this, total_code_size);
  auto jump_tables =
      FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
  return {code_space, jump_tables};
1414 1415 1416
}

std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
1417
    int index, base::Vector<byte> instructions, int stack_slots,
1418
    uint32_t tagged_parameter_slots, int safepoint_table_offset,
1419 1420
    int handler_table_offset, int constant_pool_offset,
    int code_comments_offset, int unpadded_binary_size,
1421 1422 1423 1424
    base::Vector<const byte> protected_instructions_data,
    base::Vector<const byte> reloc_info,
    base::Vector<const byte> source_position_table, WasmCode::Kind kind,
    ExecutionTier tier) {
1425
  UpdateCodeSize(instructions.size(), tier, kNoDebugging);
1426

1427
  return std::unique_ptr<WasmCode>{new WasmCode{
1428
      this, index, instructions, stack_slots, tagged_parameter_slots,
1429
      safepoint_table_offset, handler_table_offset, constant_pool_offset,
1430
      code_comments_offset, unpadded_binary_size, protected_instructions_data,
1431
      reloc_info, source_position_table, kind, tier, kNoDebugging}};
1432 1433
}

1434
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
1435
  base::RecursiveMutexGuard lock(&allocation_mutex_);
1436 1437
  WasmCode** start = code_table_.get();
  WasmCode** end = start + module_->num_declared_functions;
1438
  for (WasmCode* code : base::VectorOf(start, end - start)) {
1439 1440
    if (code) WasmCodeRefScope::AddRef(code);
  }
1441
  return std::vector<WasmCode*>{start, end};
1442 1443
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
  base::RecursiveMutexGuard lock(&allocation_mutex_);
  if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();

  std::vector<WasmCode*> all_code(owned_code_.size());
  std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
                 [](auto& entry) { return entry.second.get(); });
  std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
  return all_code;
}

1455
WasmCode* NativeModule::GetCode(uint32_t index) const {
1456
  base::RecursiveMutexGuard guard(&allocation_mutex_);
1457
  WasmCode* code = code_table_[declared_function_index(module(), index)];
1458
  if (code) WasmCodeRefScope::AddRef(code);
1459 1460 1461 1462
  return code;
}

bool NativeModule::HasCode(uint32_t index) const {
1463
  base::RecursiveMutexGuard guard(&allocation_mutex_);
1464
  return code_table_[declared_function_index(module(), index)] != nullptr;
1465 1466
}

1467
bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
1468
  base::RecursiveMutexGuard guard(&allocation_mutex_);
1469 1470 1471 1472
  return code_table_[declared_function_index(module(), index)] != nullptr &&
         code_table_[declared_function_index(module(), index)]->tier() == tier;
}

1473 1474 1475 1476 1477 1478 1479 1480 1481
void NativeModule::SetWasmSourceMap(
    std::unique_ptr<WasmModuleSourceMap> source_map) {
  source_map_ = std::move(source_map);
}

WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
  return source_map_.get();
}

1482 1483 1484 1485 1486
WasmCode* NativeModule::CreateEmptyJumpTableLocked(int jump_table_size) {
  return CreateEmptyJumpTableInRegionLocked(jump_table_size,
                                            kUnrestrictedRegion);
}

1487 1488
WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
    int jump_table_size, base::AddressRegion region) {
1489
  allocation_mutex_.AssertHeld();
1490
  // Only call this if we really need a jump table.
1491
  DCHECK_LT(0, jump_table_size);
1492
  CodeSpaceWriteScope code_space_write_scope(this);
1493
  base::Vector<uint8_t> code_space =
1494
      code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
1495
  DCHECK(!code_space.empty());
1496
  UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
1497
  ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
1498
  std::unique_ptr<WasmCode> code{
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
      new WasmCode{this,                  // native_module
                   kAnonymousFuncIndex,   // index
                   code_space,            // instructions
                   0,                     // stack_slots
                   0,                     // tagged_parameter_slots
                   0,                     // safepoint_table_offset
                   jump_table_size,       // handler_table_offset
                   jump_table_size,       // constant_pool_offset
                   jump_table_size,       // code_comments_offset
                   jump_table_size,       // unpadded_binary_size
                   {},                    // protected_instructions
                   {},                    // reloc_info
                   {},                    // source_pos
                   WasmCode::kJumpTable,  // kind
                   ExecutionTier::kNone,  // tier
                   kNoDebugging}};        // for_debugging
1515
  return PublishCodeLocked(std::move(code));
1516 1517
}

1518 1519 1520 1521 1522 1523 1524 1525 1526
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
                                  ForDebugging for_debugging) {
  if (for_debugging != kNoDebugging) return;
  // Count jump tables (ExecutionTier::kNone) for both Liftoff and TurboFan as
  // this is shared code.
  if (tier != ExecutionTier::kTurbofan) liftoff_code_size_.fetch_add(size);
  if (tier != ExecutionTier::kLiftoff) turbofan_code_size_.fetch_add(size);
}

1527
void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
1528
  allocation_mutex_.AssertHeld();
1529 1530

  for (auto& code_space_data : code_space_data_) {
1531
    DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1532
    if (!code_space_data.jump_table) continue;
1533
    PatchJumpTableLocked(code_space_data, slot_index, target);
1534 1535 1536
  }
}

1537
void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
1538
                                        uint32_t slot_index, Address target) {
1539
  allocation_mutex_.AssertHeld();
1540 1541 1542 1543

  DCHECK_NOT_NULL(code_space_data.jump_table);
  DCHECK_NOT_NULL(code_space_data.far_jump_table);

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
  // Jump tables are often allocated next to each other, so we can switch
  // permissions on both at the same time.
  if (code_space_data.jump_table->instructions().end() ==
      code_space_data.far_jump_table->instructions().begin()) {
    base::Vector<uint8_t> jump_tables_space = base::VectorOf(
        code_space_data.jump_table->instructions().begin(),
        code_space_data.jump_table->instructions().size() +
            code_space_data.far_jump_table->instructions().size());
    code_allocator_.MakeWritable(AddressRegionOf(jump_tables_space));
  } else {
    code_allocator_.MakeWritable(
        AddressRegionOf(code_space_data.jump_table->instructions()));
    code_allocator_.MakeWritable(
        AddressRegionOf(code_space_data.far_jump_table->instructions()));
  }
1559

1560
  DCHECK_LT(slot_index, module_->num_declared_functions);
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
  Address jump_table_slot =
      code_space_data.jump_table->instruction_start() +
      JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
  uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
      WasmCode::kRuntimeStubCount + slot_index);
  // Only pass the far jump table start if the far jump table actually has a
  // slot for this function index (i.e. does not only contain runtime stubs).
  bool has_far_jump_slot =
      far_jump_table_offset <
      code_space_data.far_jump_table->instructions().size();
  Address far_jump_table_start =
      code_space_data.far_jump_table->instruction_start();
  Address far_jump_table_slot =
      has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
                        : kNullAddress;
  JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
                                         target);
}

1580
void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
1581 1582
  allocation_mutex_.AssertHeld();

1583 1584
  // Each code space must be at least twice as large as the overhead per code
  // space. Otherwise, we are wasting too much memory.
1585 1586
  DCHECK_GE(region.size(),
            2 * OverheadPerCodeSpace(module()->num_declared_functions));
1587

1588
  CodeSpaceWriteScope code_space_write_scope(this);
1589 1590 1591 1592 1593 1594
#if defined(V8_OS_WIN64)
  // On some platforms, specifically Win64, we need to reserve some pages at
  // the beginning of an executable space.
  // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
  // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
  // for details.
1595
  if (WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
1596 1597
    size_t size = Heap::GetCodeRangeReservedAreaSize();
    DCHECK_LT(0, size);
1598
    base::Vector<byte> padding =
1599
        code_allocator_.AllocateForCodeInRegion(this, size, region);
1600
    CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
1601 1602
    win64_unwindinfo::RegisterNonABICompliantCodeRange(
        reinterpret_cast<void*>(region.begin()), region.size());
1603 1604 1605 1606 1607
  }
#endif  // V8_OS_WIN64

  WasmCodeRefScope code_ref_scope;
  WasmCode* jump_table = nullptr;
1608
  WasmCode* far_jump_table = nullptr;
1609
  const uint32_t num_wasm_functions = module_->num_declared_functions;
1610
  const bool is_first_code_space = code_space_data_.empty();
1611
  // We always need a far jump table, because it contains the runtime stubs.
1612 1613
  const bool needs_far_jump_table =
      !FindJumpTablesForRegionLocked(region).is_valid();
1614
  const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
1615 1616

  if (needs_jump_table) {
1617 1618
    jump_table = CreateEmptyJumpTableInRegionLocked(
        JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
1619 1620 1621
    CHECK(region.contains(jump_table->instruction_start()));
  }

1622 1623
  if (needs_far_jump_table) {
    int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
1624
    far_jump_table = CreateEmptyJumpTableInRegionLocked(
1625 1626 1627
        JumpTableAssembler::SizeForNumberOfFarJumpSlots(
            WasmCode::kRuntimeStubCount,
            NumWasmFunctionsInFarJumpTable(num_function_slots)),
1628
        region);
1629 1630
    CHECK(region.contains(far_jump_table->instruction_start()));
    EmbeddedData embedded_data = EmbeddedData::FromBlob();
1631
#define RUNTIME_STUB(Name) Builtin::k##Name,
1632
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
1633
    Builtin stub_names[WasmCode::kRuntimeStubCount] = {
1634
        WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
1635 1636
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
1637
    static_assert(Builtins::kAllBuiltinsAreIsolateIndependent);
1638 1639
    Address builtin_addresses[WasmCode::kRuntimeStubCount];
    for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
1640
      Builtin builtin = stub_names[i];
1641 1642 1643 1644 1645
      builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
    }
    JumpTableAssembler::GenerateFarJumpTable(
        far_jump_table->instruction_start(), builtin_addresses,
        WasmCode::kRuntimeStubCount, num_function_slots);
1646 1647
  }

1648 1649 1650 1651 1652 1653 1654
  if (is_first_code_space) {
    // This can be updated and accessed without locks, since the addition of the
    // first code space happens during initialization of the {NativeModule},
    // where no concurrent accesses are possible.
    main_jump_table_ = jump_table;
    main_far_jump_table_ = far_jump_table;
  }
1655

1656
  code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
1657 1658 1659 1660

  if (jump_table && !is_first_code_space) {
    // Patch the new jump table(s) with existing functions. If this is the first
    // code space, there cannot be any functions that have been compiled yet.
1661
    const CodeSpaceData& new_code_space_data = code_space_data_.back();
1662 1663
    for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
         ++slot_index) {
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
      if (code_table_[slot_index]) {
        PatchJumpTableLocked(new_code_space_data, slot_index,
                             code_table_[slot_index]->instruction_start());
      } else if (lazy_compile_table_) {
        Address lazy_compile_target =
            lazy_compile_table_->instruction_start() +
            JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
        PatchJumpTableLocked(new_code_space_data, slot_index,
                             lazy_compile_target);
      }
1674 1675
    }
  }
1676 1677
}

1678 1679 1680
namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {
 public:
1681
  explicit NativeModuleWireBytesStorage(
1682
      std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes)
1683
      : wire_bytes_(std::move(wire_bytes)) {}
1684

1685
  base::Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
1686 1687 1688
    return std::atomic_load(&wire_bytes_)
        ->as_vector()
        .SubVector(ref.offset(), ref.end_offset());
1689 1690
  }

1691 1692 1693 1694 1695
  base::Optional<ModuleWireBytes> GetModuleBytes() const final {
    return base::Optional<ModuleWireBytes>(
        std::atomic_load(&wire_bytes_)->as_vector());
  }

1696
 private:
1697
  const std::shared_ptr<base::OwnedVector<const uint8_t>> wire_bytes_;
1698 1699 1700
};
}  // namespace

1701
void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {
1702
  auto shared_wire_bytes =
1703
      std::make_shared<base::OwnedVector<const uint8_t>>(std::move(wire_bytes));
1704
  std::atomic_store(&wire_bytes_, shared_wire_bytes);
1705
  if (!shared_wire_bytes->empty()) {
1706
    compilation_state_->SetWireBytesStorage(
1707 1708
        std::make_shared<NativeModuleWireBytesStorage>(
            std::move(shared_wire_bytes)));
1709 1710 1711
  }
}

1712
void NativeModule::UpdateCPUDuration(size_t cpu_duration, ExecutionTier tier) {
1713 1714 1715
  if (!compilation_state_->baseline_compilation_finished()) {
    baseline_compilation_cpu_duration_.fetch_add(cpu_duration,
                                                 std::memory_order_relaxed);
1716
  } else if (tier == ExecutionTier::kTurbofan) {
1717
    tier_up_cpu_duration_.fetch_add(cpu_duration, std::memory_order_relaxed);
1718 1719 1720
  }
}

1721
void NativeModule::TransferNewOwnedCodeLocked() const {
1722
  allocation_mutex_.AssertHeld();
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
  DCHECK(!new_owned_code_.empty());
  // Sort the {new_owned_code_} vector reversed, such that the position of the
  // previously inserted element can be used as a hint for the next element. If
  // elements in {new_owned_code_} are adjacent, this will guarantee
  // constant-time insertion into the map.
  std::sort(new_owned_code_.begin(), new_owned_code_.end(),
            [](const std::unique_ptr<WasmCode>& a,
               const std::unique_ptr<WasmCode>& b) {
              return a->instruction_start() > b->instruction_start();
            });
  auto insertion_hint = owned_code_.end();
  for (auto& code : new_owned_code_) {
    DCHECK_EQ(0, owned_code_.count(code->instruction_start()));
    // Check plausibility of the insertion hint.
    DCHECK(insertion_hint == owned_code_.end() ||
           insertion_hint->first > code->instruction_start());
    insertion_hint = owned_code_.emplace_hint(
        insertion_hint, code->instruction_start(), std::move(code));
  }
  new_owned_code_.clear();
}

1745
void NativeModule::InsertToCodeCache(WasmCode* code) {
1746
  allocation_mutex_.AssertHeld();
1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
  DCHECK_NOT_NULL(cached_code_);
  if (code->IsAnonymous()) return;
  // Only cache Liftoff debugging code or TurboFan code (no breakpoints or
  // stepping).
  if (code->tier() == ExecutionTier::kLiftoff &&
      code->for_debugging() != kForDebugging) {
    return;
  }
  auto key = std::make_pair(code->tier(), code->index());
  if (cached_code_->insert(std::make_pair(key, code)).second) {
    code->IncRef();
  }
}

1761
WasmCode* NativeModule::Lookup(Address pc) const {
1762
  base::RecursiveMutexGuard lock(&allocation_mutex_);
1763
  if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
1764 1765 1766 1767 1768 1769 1770 1771
  auto iter = owned_code_.upper_bound(pc);
  if (iter == owned_code_.begin()) return nullptr;
  --iter;
  WasmCode* candidate = iter->second.get();
  DCHECK_EQ(candidate->instruction_start(), iter->first);
  if (!candidate->contains(pc)) return nullptr;
  WasmCodeRefScope::AddRef(candidate);
  return candidate;
1772 1773
}

1774
NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
1775
    base::AddressRegion code_region) const {
1776
  allocation_mutex_.AssertHeld();
1777 1778 1779 1780 1781 1782 1783 1784
  auto jump_table_usable = [code_region](const WasmCode* jump_table) {
    Address table_start = jump_table->instruction_start();
    Address table_end = table_start + jump_table->instructions().size();
    // Compute the maximum distance from anywhere in the code region to anywhere
    // in the jump table, avoiding any underflow.
    size_t max_distance = std::max(
        code_region.end() > table_start ? code_region.end() - table_start : 0,
        table_end > code_region.begin() ? table_end - code_region.begin() : 0);
1785 1786 1787 1788 1789
    // We can allow a max_distance that is equal to kMaxCodeSpaceSize, because
    // every call or jump will target an address *within* the region, but never
    // exactly the end of the region. So all occuring offsets are actually
    // smaller than max_distance.
    return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
1790
  };
1791

1792
  for (auto& code_space_data : code_space_data_) {
1793 1794 1795 1796 1797 1798 1799 1800 1801
    DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
    if (!code_space_data.far_jump_table) continue;
    // Only return these jump tables if they are reachable from the whole
    // {code_region}.
    if (kNeedsFarJumpsBetweenCodeSpaces &&
        (!jump_table_usable(code_space_data.far_jump_table) ||
         (code_space_data.jump_table &&
          !jump_table_usable(code_space_data.jump_table)))) {
      continue;
1802
    }
1803 1804 1805 1806
    return {code_space_data.jump_table
                ? code_space_data.jump_table->instruction_start()
                : kNullAddress,
            code_space_data.far_jump_table->instruction_start()};
1807
  }
1808
  return {};
1809 1810
}

1811 1812
Address NativeModule::GetNearCallTargetForFunction(
    uint32_t func_index, const JumpTablesRef& jump_tables) const {
1813
  DCHECK(jump_tables.is_valid());
1814
  uint32_t slot_offset = JumpTableOffset(module(), func_index);
1815 1816 1817 1818 1819
  return jump_tables.jump_table_start + slot_offset;
}

Address NativeModule::GetNearRuntimeStubEntry(
    WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
1820
  DCHECK(jump_tables.is_valid());
1821 1822
  auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
  return jump_tables.far_jump_table_start + offset;
1823 1824
}

1825 1826
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
    Address slot_address) const {
1827 1828 1829 1830 1831 1832
  WasmCodeRefScope code_refs;
  WasmCode* code = Lookup(slot_address);
  DCHECK_NOT_NULL(code);
  DCHECK_EQ(WasmCode::kJumpTable, code->kind());
  uint32_t slot_offset =
      static_cast<uint32_t>(slot_address - code->instruction_start());
1833
  uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
1834
  DCHECK_LT(slot_idx, module_->num_declared_functions);
1835 1836 1837
  DCHECK_EQ(slot_address,
            code->instruction_start() +
                JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
1838
  return module_->num_imported_functions + slot_idx;
1839 1840
}

1841
WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
1842
  base::RecursiveMutexGuard guard(&allocation_mutex_);
1843 1844

  for (auto& code_space_data : code_space_data_) {
1845 1846
    if (code_space_data.far_jump_table != nullptr &&
        code_space_data.far_jump_table->contains(target)) {
1847 1848 1849 1850 1851 1852 1853 1854 1855
      uint32_t offset = static_cast<uint32_t>(
          target - code_space_data.far_jump_table->instruction_start());
      uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
      if (index >= WasmCode::kRuntimeStubCount) continue;
      if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
        continue;
      }
      return static_cast<WasmCode::RuntimeStubId>(index);
    }
1856
  }
1857 1858 1859 1860 1861

  // Invalid address.
  return WasmCode::kRuntimeStubCount;
}

1862
NativeModule::~NativeModule() {
1863
  TRACE_HEAP("Deleting native module: %p\n", this);
1864 1865
  // Cancel all background compilation before resetting any field of the
  // NativeModule or freeing anything.
1866
  compilation_state_->CancelCompilation();
1867
  GetWasmEngine()->FreeNativeModule(this);
1868 1869 1870 1871
  // Free the import wrapper cache before releasing the {WasmCode} objects in
  // {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
  // decrease reference counts on the {WasmCode} objects.
  import_wrapper_cache_.reset();
1872 1873
}

1874 1875
WasmCodeManager::WasmCodeManager()
    : max_committed_code_space_(FLAG_wasm_max_code_space * MB),
1876
      critical_committed_code_space_(max_committed_code_space_ / 2) {}
1877

1878 1879 1880 1881 1882
WasmCodeManager::~WasmCodeManager() {
  // No more committed code space.
  DCHECK_EQ(0, total_committed_code_space_.load());
}

1883
#if defined(V8_OS_WIN64)
1884 1885
// static
bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() {
1886
  return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
1887
         FLAG_win64_unwinding_info;
1888
}
1889
#endif  // V8_OS_WIN64
1890

1891
void WasmCodeManager::Commit(base::AddressRegion region) {
1892
  // TODO(v8:8462): Remove eager commit once perf supports remapping.
1893
  if (FLAG_perf_prof) return;
1894 1895
  DCHECK(IsAligned(region.begin(), CommitPageSize()));
  DCHECK(IsAligned(region.size(), CommitPageSize()));
1896 1897 1898
  // Reserve the size. Use CAS loop to avoid overflow on
  // {total_committed_code_space_}.
  size_t old_value = total_committed_code_space_.load();
1899
  while (true) {
1900
    DCHECK_GE(max_committed_code_space_, old_value);
1901
    if (region.size() > max_committed_code_space_ - old_value) {
1902 1903 1904
      auto oom_detail = base::FormattedString{}
                        << "trying to commit " << region.size()
                        << ", already committed " << old_value;
1905 1906 1907
      V8::FatalProcessOutOfMemory(nullptr,
                                  "Exceeding maximum wasm committed code space",
                                  oom_detail.PrintToArray().data());
1908 1909
      UNREACHABLE();
    }
1910 1911
    if (total_committed_code_space_.compare_exchange_weak(
            old_value, old_value + region.size())) {
1912 1913
      break;
    }
1914
  }
1915 1916 1917 1918 1919 1920 1921
  // Even when we employ W^X with FLAG_wasm_write_protect_code_memory == true,
  // code pages need to be initially allocated with RWX permission because of
  // concurrent compilation/execution. For this reason there is no distinction
  // here based on FLAG_wasm_write_protect_code_memory.
  // TODO(dlehmann): This allocates initially as writable and executable, and
  // as such is not safe-by-default. In particular, if
  // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
1922 1923
  // because no {CodeSpaceWriteScope} is created), the writable permission is
  // never withdrawn.
1924
  // One potential fix is to allocate initially with kReadExecute only, which
1925 1926 1927
  // forces all compilation threads to add the missing {CodeSpaceWriteScope}s
  // before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is
  // open when calling this method.
1928 1929
  PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;

1930
  bool success = false;
1931
  if (MemoryProtectionKeysEnabled()) {
1932
#if V8_HAS_PKU_JIT_WRITE_PROTECT
1933
    TRACE_HEAP(
1934
        "Setting rwx permissions and memory protection key for 0x%" PRIxPTR
1935
        ":0x%" PRIxPTR "\n",
1936
        region.begin(), region.end());
1937
    success = base::MemoryProtectionKey::SetPermissionsAndKey(
1938 1939 1940 1941 1942
        GetPlatformPageAllocator(), region, permission,
        RwxMemoryWriteScope::memory_protection_key());
#else
    UNREACHABLE();
#endif  // V8_HAS_PKU_JIT_WRITE_PROTECT
1943 1944 1945 1946 1947 1948
  } else {
    TRACE_HEAP("Setting rwx permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
               region.begin(), region.end());
    success = SetPermissions(GetPlatformPageAllocator(), region.begin(),
                             region.size(), permission);
  }
1949

1950
  if (V8_UNLIKELY(!success)) {
1951 1952
    auto oom_detail = base::FormattedString{} << "region size: "
                                              << region.size();
1953 1954
    V8::FatalProcessOutOfMemory(nullptr, "Commit wasm code space",
                                oom_detail.PrintToArray().data());
1955
    UNREACHABLE();
1956 1957 1958
  }
}

1959
void WasmCodeManager::Decommit(base::AddressRegion region) {
1960
  // TODO(v8:8462): Remove this once perf supports remapping.
1961
  if (FLAG_perf_prof) return;
1962
  PageAllocator* allocator = GetPlatformPageAllocator();
1963 1964 1965 1966
  DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
  DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
  size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
  DCHECK_LE(region.size(), old_committed);
1967
  USE(old_committed);
1968
  TRACE_HEAP("Decommitting system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
1969
             region.begin(), region.end());
1970 1971
  CHECK(allocator->DecommitPages(reinterpret_cast<void*>(region.begin()),
                                 region.size()));
1972 1973
}

1974 1975
void WasmCodeManager::AssignRange(base::AddressRegion region,
                                  NativeModule* native_module) {
1976
  base::MutexGuard lock(&native_modules_mutex_);
1977 1978
  lookup_map_.insert(std::make_pair(
      region.begin(), std::make_pair(region.end(), native_module)));
1979 1980
}

1981
VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
1982
  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1983
  DCHECK_GT(size, 0);
1984 1985
  size_t allocate_page_size = page_allocator->AllocatePageSize();
  size = RoundUp(size, allocate_page_size);
1986
  if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
1987

1988 1989 1990 1991
  // When we start exposing Wasm in jitless mode, then the jitless flag
  // will have to determine whether we set kMapAsJittable or not.
  DCHECK(!FLAG_jitless);
  VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
1992
                    JitPermission::kMapAsJittable);
1993
  if (!mem.IsReserved()) return {};
1994 1995
  TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
             mem.end(), mem.size());
1996

1997
  // TODO(v8:8462): Remove eager commit once perf supports remapping.
1998 1999 2000 2001
  if (FLAG_perf_prof) {
    SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
                   PageAllocator::kReadWriteExecute);
  }
2002
  return mem;
2003 2004
}

2005 2006 2007 2008 2009 2010 2011 2012
namespace {
// The numbers here are rough estimates, used to calculate the size of the
// initial code reservation and for estimating the amount of external memory
// reported to the GC.
// They do not need to be accurate. Choosing them too small will result in
// separate code spaces being allocated (compile time and runtime overhead),
// choosing them too large results in over-reservation (virtual address space
// only).
2013 2014 2015 2016 2017 2018 2019
// In doubt, choose the numbers slightly too large on 64-bit systems (where
// {kNeedsFarJumpsBetweenCodeSpaces} is {true}). Over-reservation is less
// critical in a 64-bit address space, but separate code spaces cause overhead.
// On 32-bit systems (where {kNeedsFarJumpsBetweenCodeSpaces} is {false}), the
// opposite is true: Multiple code spaces are cheaper, and address space is
// scarce, hence choose numbers slightly too small.
//
2020 2021 2022
// Numbers can be determined by running benchmarks with
// --trace-wasm-compilation-times, and piping the output through
// tools/wasm/code-size-factors.py.
2023
#if V8_TARGET_ARCH_X64
2024
constexpr size_t kTurbofanFunctionOverhead = 24;
2025
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2026
constexpr size_t kLiftoffFunctionOverhead = 56;
2027
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
2028
constexpr size_t kImportSize = 640;
2029 2030
#elif V8_TARGET_ARCH_IA32
constexpr size_t kTurbofanFunctionOverhead = 20;
2031
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2032
constexpr size_t kLiftoffFunctionOverhead = 48;
2033 2034
constexpr size_t kLiftoffCodeSizeMultiplier = 3;
constexpr size_t kImportSize = 600;
2035
#elif V8_TARGET_ARCH_ARM
2036
constexpr size_t kTurbofanFunctionOverhead = 44;
2037
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2038 2039 2040
constexpr size_t kLiftoffFunctionOverhead = 96;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
constexpr size_t kImportSize = 550;
2041
#elif V8_TARGET_ARCH_ARM64
2042
constexpr size_t kTurbofanFunctionOverhead = 40;
2043
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
2044
constexpr size_t kLiftoffFunctionOverhead = 68;
2045
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
2046 2047 2048 2049 2050 2051 2052 2053 2054
constexpr size_t kImportSize = 750;
#else
// Other platforms should add their own estimates for best performance. Numbers
// below are the maximum of other architectures.
constexpr size_t kTurbofanFunctionOverhead = 44;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 96;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
constexpr size_t kImportSize = 750;
2055 2056 2057 2058 2059 2060 2061 2062 2063
#endif
}  // namespace

// static
size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {
  return kLiftoffFunctionOverhead + kCodeAlignment / 2 +
         body_size * kLiftoffCodeSizeMultiplier;
}

2064
// static
2065 2066 2067
size_t WasmCodeManager::EstimateNativeModuleCodeSize(
    const WasmModule* module, bool include_liftoff,
    DynamicTiering dynamic_tiering) {
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
  int num_functions = static_cast<int>(module->num_declared_functions);
  int num_imported_functions = static_cast<int>(module->num_imported_functions);
  int code_section_length = 0;
  if (num_functions > 0) {
    DCHECK_EQ(module->functions.size(), num_imported_functions + num_functions);
    auto* first_fn = &module->functions[module->num_imported_functions];
    auto* last_fn = &module->functions.back();
    code_section_length =
        static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
  }
  return EstimateNativeModuleCodeSize(num_functions, num_imported_functions,
2079 2080
                                      code_section_length, include_liftoff,
                                      dynamic_tiering);
2081 2082 2083
}

// static
2084 2085 2086
size_t WasmCodeManager::EstimateNativeModuleCodeSize(
    int num_functions, int num_imported_functions, int code_section_length,
    bool include_liftoff, DynamicTiering dynamic_tiering) {
2087 2088 2089 2090 2091 2092 2093
  // The size for the jump table and far jump table is added later, per code
  // space (see {OverheadPerCodeSpace}). We still need to add the overhead for
  // the lazy compile table once, though. There are configurations where we do
  // not need it (non-asm.js, no dynamic tiering and no lazy compilation), but
  // we ignore this here as most of the time we will need it.
  const size_t lazy_compile_table_size =
      JumpTableAssembler::SizeForNumberOfLazyFunctions(num_functions);
2094 2095 2096 2097 2098 2099 2100 2101 2102 2103

  const size_t size_of_imports = kImportSize * num_imported_functions;

  const size_t overhead_per_function_turbofan =
      kTurbofanFunctionOverhead + kCodeAlignment / 2;
  size_t size_of_turbofan = overhead_per_function_turbofan * num_functions +
                            kTurbofanCodeSizeMultiplier * code_section_length;

  const size_t overhead_per_function_liftoff =
      kLiftoffFunctionOverhead + kCodeAlignment / 2;
2104 2105 2106 2107
  const size_t size_of_liftoff =
      include_liftoff ? overhead_per_function_liftoff * num_functions +
                            kLiftoffCodeSizeMultiplier * code_section_length
                      : 0;
2108 2109 2110 2111

  // With dynamic tiering we don't expect to compile more than 25% with
  // TurboFan. If there is no liftoff though then all code will get generated
  // by TurboFan.
2112
  if (include_liftoff && dynamic_tiering) size_of_turbofan /= 4;
2113

2114 2115
  return lazy_compile_table_size + size_of_imports + size_of_liftoff +
         size_of_turbofan;
2116 2117
}

2118
// static
2119
size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
2120 2121 2122 2123 2124 2125 2126
    const WasmModule* module) {
  size_t wasm_module_estimate = EstimateStoredSize(module);

  uint32_t num_wasm_functions = module->num_declared_functions;

  // TODO(wasm): Include wire bytes size.
  size_t native_module_estimate =
2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
      sizeof(NativeModule) +                      // NativeModule struct
      (sizeof(WasmCode*) * num_wasm_functions) +  // code table size
      (sizeof(WasmCode) * num_wasm_functions);    // code object size

  size_t jump_table_size = RoundUp<kCodeAlignment>(
      JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
  size_t far_jump_table_size =
      RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
          WasmCode::kRuntimeStubCount,
          NumWasmFunctionsInFarJumpTable(num_wasm_functions)));
2137

2138 2139
  return wasm_module_estimate + native_module_estimate + jump_table_size +
         far_jump_table_size;
2140 2141
}

2142 2143 2144 2145 2146 2147 2148
// static
bool WasmCodeManager::HasMemoryProtectionKeySupport() {
#if V8_HAS_PKU_JIT_WRITE_PROTECT
  return RwxMemoryWriteScope::IsSupported();
#else
  return false;
#endif  // V8_HAS_PKU_JIT_WRITE_PROTECT
2149 2150
}

2151 2152
// static
bool WasmCodeManager::MemoryProtectionKeysEnabled() {
2153 2154 2155
  return HasMemoryProtectionKeySupport() && FLAG_wasm_memory_protection_keys;
}

2156 2157 2158 2159 2160 2161 2162
// static
bool WasmCodeManager::MemoryProtectionKeyWritable() {
#if V8_HAS_PKU_JIT_WRITE_PROTECT
  return RwxMemoryWriteScope::IsPKUWritable();
#else
  return false;
#endif  // V8_HAS_PKU_JIT_WRITE_PROTECT
2163 2164
}

2165 2166
// static
void WasmCodeManager::InitializeMemoryProtectionKeyPermissionsIfSupported() {
2167 2168 2169 2170
  if (!HasMemoryProtectionKeySupport()) return;
  // The default permission is {kDisableAccess}. Switch from that to
  // {kDisableWrite}. Leave other permissions untouched, as the thread did
  // already use the memory protection key in that case.
2171 2172
  RwxMemoryWriteScope initialize_permission_scope(
      "For initialization if PKU is in kNoAccess permission case.");
2173 2174
}

2175
base::AddressRegion WasmCodeManager::AllocateAssemblerBufferSpace(int size) {
2176
#if V8_HAS_PKU_JIT_WRITE_PROTECT
2177 2178 2179 2180 2181 2182 2183
  if (MemoryProtectionKeysEnabled()) {
    auto* page_allocator = GetPlatformPageAllocator();
    size_t page_size = page_allocator->AllocatePageSize();
    size = RoundUp(size, page_size);
    void* mapped = AllocatePages(page_allocator, nullptr, size, page_size,
                                 PageAllocator::kNoAccess);
    if (V8_UNLIKELY(!mapped)) {
2184 2185 2186
      auto oom_detail = base::FormattedString{}
                        << "cannot allocate " << size
                        << " more bytes for assembler buffers";
2187 2188 2189
      V8::FatalProcessOutOfMemory(nullptr,
                                  "Allocate protected assembler buffer space",
                                  oom_detail.PrintToArray().data());
2190 2191 2192 2193
      UNREACHABLE();
    }
    auto region =
        base::AddressRegionOf(reinterpret_cast<uint8_t*>(mapped), size);
2194 2195
    CHECK(base::MemoryProtectionKey::SetPermissionsAndKey(
        page_allocator, region, PageAllocator::kReadWrite,
2196
        RwxMemoryWriteScope::memory_protection_key()));
2197 2198
    return region;
  }
2199
#endif  // V8_HAS_PKU_JIT_WRITE_PROTECT
2200 2201 2202 2203 2204
  DCHECK(!MemoryProtectionKeysEnabled());
  return base::AddressRegionOf(new uint8_t[size], size);
}

void WasmCodeManager::FreeAssemblerBufferSpace(base::AddressRegion region) {
2205
#if V8_HAS_PKU_JIT_WRITE_PROTECT
2206 2207 2208 2209 2210 2211
  if (MemoryProtectionKeysEnabled()) {
    auto* page_allocator = GetPlatformPageAllocator();
    FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
              region.size());
    return;
  }
2212
#endif  // V8_HAS_PKU_JIT_WRITE_PROTECT
2213 2214 2215 2216
  DCHECK(!MemoryProtectionKeysEnabled());
  delete[] reinterpret_cast<uint8_t*>(region.begin());
}

2217
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
2218 2219
    Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
    std::shared_ptr<const WasmModule> module) {
2220 2221
  if (total_committed_code_space_.load() >
      critical_committed_code_space_.load()) {
2222 2223
    (reinterpret_cast<v8::Isolate*>(isolate))
        ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
2224 2225 2226 2227
    size_t committed = total_committed_code_space_.load();
    DCHECK_GE(max_committed_code_space_, committed);
    critical_committed_code_space_.store(
        committed + (max_committed_code_space_ - committed) / 2);
2228 2229
  }

2230
  size_t code_vmem_size =
2231
      ReservationSize(code_size_estimate, module->num_declared_functions, 0);
2232

2233 2234
  // The '--wasm-max-initial-code-space-reservation' testing flag can be used to
  // reduce the maximum size of the initial code space reservation (in MB).
2235 2236 2237 2238 2239 2240
  if (FLAG_wasm_max_initial_code_space_reservation > 0) {
    size_t flag_max_bytes =
        static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
    if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
  }

2241
  // Try up to two times; getting rid of dead JSArrayBuffer allocations might
2242 2243 2244
  // require two GCs because the first GC maybe incremental and may have
  // floating garbage.
  static constexpr int kAllocationRetries = 2;
2245
  VirtualMemory code_space;
2246
  for (int retries = 0;; ++retries) {
2247 2248
    code_space = TryAllocate(code_vmem_size);
    if (code_space.IsReserved()) break;
2249
    if (retries == kAllocationRetries) {
2250 2251 2252
      auto oom_detail = base::FormattedString{}
                        << "NewNativeModule cannot allocate code space of "
                        << code_vmem_size << " bytes";
2253
      V8::FatalProcessOutOfMemory(isolate, "Allocate initial wasm code space",
2254
                                  oom_detail.PrintToArray().data());
2255
      UNREACHABLE();
2256 2257 2258 2259 2260 2261
    }
    // Run one GC, then try the allocation again.
    isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
                                                true);
  }

2262 2263 2264
  Address start = code_space.address();
  size_t size = code_space.size();
  Address end = code_space.end();
2265
  std::shared_ptr<NativeModule> ret;
2266
  new NativeModule(enabled, DynamicTiering{FLAG_wasm_dynamic_tiering.value()},
2267 2268
                   std::move(code_space), std::move(module),
                   isolate->async_counters(), &ret);
2269 2270
  // The constructor initialized the shared_ptr.
  DCHECK_NOT_NULL(ret);
2271 2272
  TRACE_HEAP("New NativeModule %p: Mem: 0x%" PRIxPTR ",+%zu\n", ret.get(),
             start, size);
2273

2274 2275
  base::MutexGuard lock(&native_modules_mutex_);
  lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
2276
  return ret;
2277 2278
}

2279 2280 2281
void NativeModule::SampleCodeSize(
    Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
  size_t code_size = sampling_time == kSampling
2282 2283
                         ? code_allocator_.committed_code_space()
                         : code_allocator_.generated_code_size();
2284 2285 2286 2287 2288 2289
  int code_size_mb = static_cast<int>(code_size / MB);
  Histogram* histogram = nullptr;
  switch (sampling_time) {
    case kAfterBaseline:
      histogram = counters->wasm_module_code_size_mb_after_baseline();
      break;
2290
    case kSampling: {
2291
      histogram = counters->wasm_module_code_size_mb();
2292 2293 2294
      // If this is a wasm module of >= 2MB, also sample the freed code size,
      // absolute and relative. Code GC does not happen on asm.js modules, and
      // small modules will never trigger GC anyway.
2295
      size_t generated_size = code_allocator_.generated_code_size();
2296 2297 2298 2299 2300 2301 2302
      if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
        size_t freed_size = code_allocator_.freed_code_size();
        DCHECK_LE(freed_size, generated_size);
        int freed_percent = static_cast<int>(100 * freed_size / generated_size);
        counters->wasm_module_freed_code_size_percent()->AddSample(
            freed_percent);
      }
2303
      break;
2304
    }
2305 2306 2307 2308
  }
  histogram->AddSample(code_size_mb);
}

2309 2310 2311 2312
std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
    WasmCompilationResult result) {
  std::vector<std::unique_ptr<WasmCode>> code = AddCompiledCode({&result, 1});
  return std::move(code[0]);
2313
}
2314

2315
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
2316
    base::Vector<WasmCompilationResult> results) {
2317 2318
  TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
               "wasm.AddCompiledCode", "num", results.size());
2319
  DCHECK(!results.empty());
2320 2321 2322 2323 2324
  // First, allocate code space for all the results.
  size_t total_code_space = 0;
  for (auto& result : results) {
    DCHECK(result.succeeded());
    total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
2325 2326
    if (result.result_tier == ExecutionTier::kLiftoff) {
      int index = result.func_index;
2327 2328 2329 2330 2331 2332 2333
      int* slots = &module()->functions[index].feedback_slots;
#if DEBUG
      int current_value = base::Relaxed_Load(slots);
      DCHECK(current_value == 0 ||
             current_value == result.feedback_vector_slots);
#endif
      base::Relaxed_Store(slots, result.feedback_vector_slots);
2334
    }
2335
  }
2336
  base::Vector<byte> code_space;
2337
  NativeModule::JumpTablesRef jump_tables;
2338
  CodeSpaceWriteScope code_space_write_scope(this);
2339
  {
2340
    base::RecursiveMutexGuard guard{&allocation_mutex_};
2341 2342 2343 2344 2345
    code_space = code_allocator_.AllocateForCode(this, total_code_space);
    // Lookup the jump tables to use once, then use for all code objects.
    jump_tables =
        FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
  }
2346 2347 2348 2349 2350
  // If we happen to have a {total_code_space} which is bigger than
  // {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
  // region. If this ever happens, we need to handle this case (by splitting the
  // {results} vector in smaller chunks).
  CHECK(jump_tables.is_valid());
2351 2352 2353 2354 2355 2356

  std::vector<std::unique_ptr<WasmCode>> generated_code;
  generated_code.reserve(results.size());

  // Now copy the generated code into the code space and relocate it.
  for (auto& result : results) {
2357
    DCHECK_EQ(result.code_desc.buffer, result.instr_buffer->start());
2358
    size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
2359
    base::Vector<byte> this_code_space = code_space.SubVector(0, code_size);
2360 2361 2362
    code_space += code_size;
    generated_code.emplace_back(AddCodeWithCodeSpace(
        result.func_index, result.code_desc, result.frame_slot_count,
2363 2364 2365
        result.tagged_parameter_slots,
        result.protected_instructions_data.as_vector(),
        result.source_positions.as_vector(), GetCodeKind(result),
2366 2367
        result.result_tier, result.for_debugging, this_code_space,
        jump_tables));
2368 2369 2370
  }
  DCHECK_EQ(0, code_space.size());

2371
  return generated_code;
2372 2373
}

2374 2375 2376
void NativeModule::SetTieringState(TieringState new_tiering_state) {
  // Do not tier down asm.js (just never change the tiering state).
  if (module()->origin != kWasmOrigin) return;
2377

2378
  base::RecursiveMutexGuard lock(&allocation_mutex_);
2379
  tiering_state_ = new_tiering_state;
2380 2381 2382
}

bool NativeModule::IsTieredDown() {
2383
  base::RecursiveMutexGuard lock(&allocation_mutex_);
2384
  return tiering_state_ == kTieredDown;
2385 2386
}

2387
void NativeModule::RecompileForTiering() {
2388 2389 2390 2391 2392
  // If baseline compilation is not finished yet, we do not tier down now. This
  // would be tricky because not all code is guaranteed to be available yet.
  // Instead, we tier down after streaming compilation finished.
  if (!compilation_state_->baseline_compilation_finished()) return;

2393 2394 2395 2396
  // Read the tiering state under the lock, then trigger recompilation after
  // releasing the lock. If the tiering state was changed when the triggered
  // compilation units finish, code installation will handle that correctly.
  TieringState current_state;
2397
  {
2398
    base::RecursiveMutexGuard lock(&allocation_mutex_);
2399
    current_state = tiering_state_;
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410

    // Initialize {cached_code_} to signal that this cache should get filled
    // from now on.
    if (!cached_code_) {
      cached_code_ = std::make_unique<
          std::map<std::pair<ExecutionTier, int>, WasmCode*>>();
      // Fill with existing code.
      for (auto& code_entry : owned_code_) {
        InsertToCodeCache(code_entry.second.get());
      }
    }
2411
  }
2412
  RecompileNativeModule(this, current_state);
2413 2414
}

2415 2416
std::vector<int> NativeModule::FindFunctionsToRecompile(
    TieringState new_tiering_state) {
2417
  WasmCodeRefScope code_ref_scope;
2418
  base::RecursiveMutexGuard guard(&allocation_mutex_);
2419 2420
  // Get writable permission already here (and not inside the loop in
  // {PatchJumpTablesLocked}), to avoid switching for each slot individually.
2421
  CodeSpaceWriteScope code_space_write_scope(this);
2422 2423 2424
  std::vector<int> function_indexes;
  int imported = module()->num_imported_functions;
  int declared = module()->num_declared_functions;
2425
  const bool tier_down = new_tiering_state == kTieredDown;
2426 2427
  for (int slot_index = 0; slot_index < declared; ++slot_index) {
    int function_index = imported + slot_index;
2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
    WasmCode* old_code = code_table_[slot_index];
    bool code_is_good =
        tier_down ? old_code && old_code->for_debugging()
                  : old_code && old_code->tier() == ExecutionTier::kTurbofan;
    if (code_is_good) continue;
    DCHECK_NOT_NULL(cached_code_);
    auto cache_it = cached_code_->find(std::make_pair(
        tier_down ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan,
        function_index));
    if (cache_it != cached_code_->end()) {
      WasmCode* cached_code = cache_it->second;
      if (old_code) {
        WasmCodeRefScope::AddRef(old_code);
        // The code is added to the current {WasmCodeRefScope}, hence the ref
        // count cannot drop to zero here.
        old_code->DecRefOnLiveCode();
      }
      code_table_[slot_index] = cached_code;
      PatchJumpTablesLocked(slot_index, cached_code->instruction_start());
      cached_code->IncRef();
      continue;
    }
    // Otherwise add the function to the set of functions to recompile.
    function_indexes.push_back(function_index);
2452 2453 2454 2455
  }
  return function_indexes;
}

2456
void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {
2457
  base::RecursiveMutexGuard guard(&allocation_mutex_);
2458 2459
  // Free the code space.
  code_allocator_.FreeCode(codes);
2460

2461 2462 2463 2464 2465 2466
  if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
  DebugInfo* debug_info = debug_info_.get();
  // Free the {WasmCode} objects. This will also unregister trap handler data.
  for (WasmCode* code : codes) {
    DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
    owned_code_.erase(code->instruction_start());
2467
  }
2468 2469 2470
  // Remove debug side tables for all removed code objects, after releasing our
  // lock. This is to avoid lock order inversion.
  if (debug_info) debug_info->RemoveDebugSideTables(codes);
2471 2472
}

2473
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
2474
  base::RecursiveMutexGuard guard{&allocation_mutex_};
2475 2476 2477
  return code_allocator_.GetNumCodeSpaces();
}

2478
bool NativeModule::HasDebugInfo() const {
2479
  base::RecursiveMutexGuard guard(&allocation_mutex_);
2480 2481 2482
  return debug_info_ != nullptr;
}

2483
DebugInfo* NativeModule::GetDebugInfo() {
2484
  base::RecursiveMutexGuard guard(&allocation_mutex_);
2485 2486 2487 2488
  if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
  return debug_info_.get();
}

2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
NamesProvider* NativeModule::GetNamesProvider() {
  DCHECK(HasWireBytes());
  base::RecursiveMutexGuard guard(&allocation_mutex_);
  if (!names_provider_) {
    names_provider_ =
        std::make_unique<NamesProvider>(module_.get(), wire_bytes());
  }
  return names_provider_.get();
}

2499 2500
void WasmCodeManager::FreeNativeModule(
    base::Vector<VirtualMemory> owned_code_space, size_t committed_size) {
2501
  base::MutexGuard lock(&native_modules_mutex_);
2502
  for (auto& code_space : owned_code_space) {
2503
    DCHECK(code_space.IsReserved());
2504
    TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
2505
               code_space.address(), code_space.end(), code_space.size());
2506

2507
#if defined(V8_OS_WIN64)
2508
    if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
2509 2510 2511
      win64_unwindinfo::UnregisterNonABICompliantCodeRange(
          reinterpret_cast<void*>(code_space.address()));
    }
2512
#endif  // V8_OS_WIN64
2513

2514 2515 2516
    lookup_map_.erase(code_space.address());
    code_space.Free();
    DCHECK(!code_space.IsReserved());
2517
  }
2518

2519
  DCHECK(IsAligned(committed_size, CommitPageSize()));
2520 2521 2522 2523 2524 2525 2526
  // TODO(v8:8462): Remove this once perf supports remapping.
  if (!FLAG_perf_prof) {
    size_t old_committed =
        total_committed_code_space_.fetch_sub(committed_size);
    DCHECK_LE(committed_size, old_committed);
    USE(old_committed);
  }
2527 2528
}

2529
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
2530
  base::MutexGuard lock(&native_modules_mutex_);
2531 2532 2533 2534 2535
  if (lookup_map_.empty()) return nullptr;

  auto iter = lookup_map_.upper_bound(pc);
  if (iter == lookup_map_.begin()) return nullptr;
  --iter;
2536 2537
  Address region_start = iter->first;
  Address region_end = iter->second.first;
2538 2539 2540
  NativeModule* candidate = iter->second.second;

  DCHECK_NOT_NULL(candidate);
2541
  return region_start <= pc && pc < region_end ? candidate : nullptr;
2542 2543 2544 2545 2546
}

WasmCode* WasmCodeManager::LookupCode(Address pc) const {
  NativeModule* candidate = LookupNativeModule(pc);
  return candidate ? candidate->Lookup(pc) : nullptr;
2547 2548
}

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
namespace {
thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
}  // namespace

WasmCodeRefScope::WasmCodeRefScope()
    : previous_scope_(current_code_refs_scope) {
  current_code_refs_scope = this;
}

WasmCodeRefScope::~WasmCodeRefScope() {
  DCHECK_EQ(this, current_code_refs_scope);
  current_code_refs_scope = previous_scope_;
2561
  WasmCode::DecrementRefCount(base::VectorOf(code_ptrs_));
2562 2563 2564 2565
}

// static
void WasmCodeRefScope::AddRef(WasmCode* code) {
2566
  DCHECK_NOT_NULL(code);
2567
  WasmCodeRefScope* current_scope = current_code_refs_scope;
2568
  DCHECK_NOT_NULL(current_scope);
2569 2570
  current_scope->code_ptrs_.push_back(code);
  code->IncRef();
2571 2572
}

2573 2574 2575 2576
Builtin RuntimeStubIdToBuiltinName(WasmCode::RuntimeStubId stub_id) {
#define RUNTIME_STUB_NAME(Name) Builtin::k##Name,
#define RUNTIME_STUB_NAME_TRAP(Name) Builtin::kThrowWasm##Name,
  constexpr Builtin builtin_names[] = {
2577 2578 2579
      WASM_RUNTIME_STUB_LIST(RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP)};
#undef RUNTIME_STUB_NAME
#undef RUNTIME_STUB_NAME_TRAP
2580
  static_assert(arraysize(builtin_names) == WasmCode::kRuntimeStubCount);
2581 2582 2583 2584 2585

  DCHECK_GT(arraysize(builtin_names), stub_id);
  return builtin_names[stub_id];
}

2586 2587 2588 2589 2590 2591 2592
const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
#define RUNTIME_STUB_NAME(Name) #Name,
#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
  constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
      RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
#undef RUNTIME_STUB_NAME
#undef RUNTIME_STUB_NAME_TRAP
2593
  static_assert(arraysize(runtime_stub_names) ==
2594 2595 2596 2597 2598 2599
                WasmCode::kRuntimeStubCount + 1);

  DCHECK_GT(arraysize(runtime_stub_names), stub_id);
  return runtime_stub_names[stub_id];
}

2600 2601 2602
}  // namespace wasm
}  // namespace internal
}  // namespace v8
2603
#undef TRACE_HEAP