wasm-code-manager.cc 83.3 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/wasm/wasm-code-manager.h"
6

7 8
#include <iomanip>

9
#include "src/base/build_config.h"
10
#include "src/base/iterator.h"
11 12
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
13
#include "src/base/small-vector.h"
14 15 16
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
17
#include "src/common/globals.h"
18 19 20
#include "src/diagnostics/disassembler.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
21
#include "src/objects/objects-inl.h"
22
#include "src/snapshot/embedded/embedded-data.h"
23 24
#include "src/utils/ostreams.h"
#include "src/utils/vector.h"
25
#include "src/wasm/code-space-access.h"
26
#include "src/wasm/compilation-environment.h"
27
#include "src/wasm/function-compiler.h"
28
#include "src/wasm/jump-table-assembler.h"
29
#include "src/wasm/module-compiler.h"
30
#include "src/wasm/wasm-debug.h"
31
#include "src/wasm/wasm-import-wrapper-cache.h"
32
#include "src/wasm/wasm-module-sourcemap.h"
33 34 35 36
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"

37
#if defined(V8_OS_WIN64)
38
#include "src/diagnostics/unwinding-info-win64.h"
39
#endif  // V8_OS_WIN64
40

41 42
#define TRACE_HEAP(...)                                   \
  do {                                                    \
43
    if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
44 45
  } while (false)

46 47 48 49
namespace v8 {
namespace internal {
namespace wasm {

50 51
using trap_handler::ProtectedInstructionData;

52 53 54 55
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
#endif

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
base::AddressRegion DisjointAllocationPool::Merge(
    base::AddressRegion new_region) {
  // Find the possible insertion position by identifying the first region whose
  // start address is not less than that of {new_region}. Since there cannot be
  // any overlap between regions, this also means that the start of {above} is
  // bigger or equal than the *end* of {new_region}.
  auto above = regions_.lower_bound(new_region);
  DCHECK(above == regions_.end() || above->begin() >= new_region.end());

  // Check whether to merge with {above}.
  if (above != regions_.end() && new_region.end() == above->begin()) {
    base::AddressRegion merged_region{new_region.begin(),
                                      new_region.size() + above->size()};
    DCHECK_EQ(merged_region.end(), above->end());
    // Check whether to also merge with the region below.
    if (above != regions_.begin()) {
      auto below = above;
      --below;
      if (below->end() == new_region.begin()) {
        merged_region = {below->begin(), below->size() + merged_region.size()};
        regions_.erase(below);
      }
    }
    auto insert_pos = regions_.erase(above);
    regions_.insert(insert_pos, merged_region);
81
    return merged_region;
82 83
  }

84 85 86 87
  // No element below, and not adjavent to {above}: insert and done.
  if (above == regions_.begin()) {
    regions_.insert(above, new_region);
    return new_region;
88
  }
89

90 91
  auto below = above;
  --below;
92
  // Consistency check:
93 94 95 96 97 98 99 100 101 102
  DCHECK(above == regions_.end() || below->end() < above->begin());

  // Adjacent to {below}: merge and done.
  if (below->end() == new_region.begin()) {
    base::AddressRegion merged_region{below->begin(),
                                      below->size() + new_region.size()};
    DCHECK_EQ(merged_region.end(), new_region.end());
    regions_.erase(below);
    regions_.insert(above, merged_region);
    return merged_region;
103
  }
104 105 106 107 108

  // Not adjacent to any existing region: insert between {below} and {above}.
  DCHECK_LT(below->end(), new_region.begin());
  regions_.insert(above, new_region);
  return new_region;
109 110
}

111
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
112 113 114 115 116 117
  return AllocateInRegion(size,
                          {kNullAddress, std::numeric_limits<size_t>::max()});
}

base::AddressRegion DisjointAllocationPool::AllocateInRegion(
    size_t size, base::AddressRegion region) {
118 119 120 121 122 123 124
  // Get an iterator to the first contained region whose start address is not
  // smaller than the start address of {region}. Start the search from the
  // region one before that (the last one whose start address is smaller).
  auto it = regions_.lower_bound(region);
  if (it != regions_.begin()) --it;

  for (auto end = regions_.end(); it != end; ++it) {
125 126 127
    base::AddressRegion overlap = it->GetOverlap(region);
    if (size > overlap.size()) continue;
    base::AddressRegion ret{overlap.begin(), size};
128 129 130 131 132 133 134 135
    base::AddressRegion old = *it;
    auto insert_pos = regions_.erase(it);
    if (size == old.size()) {
      // We use the full region --> nothing to add back.
    } else if (ret.begin() == old.begin()) {
      // We return a region at the start --> shrink old region from front.
      regions_.insert(insert_pos, {old.begin() + size, old.size() - size});
    } else if (ret.end() == old.end()) {
136
      // We return a region at the end --> shrink remaining region.
137
      regions_.insert(insert_pos, {old.begin(), old.size() - size});
138
    } else {
139 140 141 142
      // We return something in the middle --> split the remaining region
      // (insert the region with smaller address first).
      regions_.insert(insert_pos, {old.begin(), ret.begin() - old.begin()});
      regions_.insert(insert_pos, {ret.end(), old.end() - ret.end()});
143
    }
144
    return ret;
145
  }
146
  return {};
147 148
}

149 150
Address WasmCode::constant_pool() const {
  if (FLAG_enable_embedded_constant_pool) {
151
    if (constant_pool_offset_ < code_comments_offset_) {
152
      return instruction_start() + constant_pool_offset_;
153 154
    }
  }
155
  return kNullAddress;
156 157
}

158 159 160 161
Address WasmCode::handler_table() const {
  return instruction_start() + handler_table_offset_;
}

162
int WasmCode::handler_table_size() const {
163
  DCHECK_GE(constant_pool_offset_, handler_table_offset_);
164
  return static_cast<int>(constant_pool_offset_ - handler_table_offset_);
165 166
}

167
Address WasmCode::code_comments() const {
168 169 170
  return instruction_start() + code_comments_offset_;
}

171
int WasmCode::code_comments_size() const {
172
  DCHECK_GE(unpadded_binary_size_, code_comments_offset_);
173
  return static_cast<int>(unpadded_binary_size_ - code_comments_offset_);
174 175
}

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
std::unique_ptr<const byte[]> WasmCode::ConcatenateBytes(
    std::initializer_list<Vector<const byte>> vectors) {
  size_t total_size = 0;
  for (auto& vec : vectors) total_size += vec.size();
  // Use default-initialization (== no initialization).
  std::unique_ptr<byte[]> result{new byte[total_size]};
  byte* ptr = result.get();
  for (auto& vec : vectors) {
    if (vec.empty()) continue;  // Avoid nullptr in {memcpy}.
    memcpy(ptr, vec.begin(), vec.size());
    ptr += vec.size();
  }
  return result;
}

191
void WasmCode::RegisterTrapHandlerData() {
192
  DCHECK(!has_trap_handler_index());
193
  if (kind() != WasmCode::kFunction) return;
194
  if (protected_instructions_size_ == 0) return;
195 196 197 198

  Address base = instruction_start();

  size_t size = instructions().size();
199
  auto protected_instruction_data = this->protected_instructions();
200
  const int index =
201 202
      RegisterHandlerData(base, size, protected_instruction_data.size(),
                          protected_instruction_data.begin());
203 204 205

  // TODO(eholk): if index is negative, fail.
  CHECK_LE(0, index);
206 207
  set_trap_handler_index(index);
  DCHECK(has_trap_handler_index());
208 209
}

210
bool WasmCode::ShouldBeLogged(Isolate* isolate) {
211 212 213
  // The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
  // to call {WasmEngine::EnableCodeLogging} if this return value would change
  // for any isolate. Otherwise we might lose code events.
jing.bao's avatar
jing.bao committed
214 215
  return isolate->logger()->is_listening_to_code_events() ||
         isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
216
         isolate->is_profiling();
217 218 219 220
}

void WasmCode::LogCode(Isolate* isolate) const {
  DCHECK(ShouldBeLogged(isolate));
221
  if (IsAnonymous()) return;
222

223
  ModuleWireBytes wire_bytes(native_module()->wire_bytes());
224 225 226 227
  WireBytesRef name_ref =
      native_module()->module()->lazily_generated_names.LookupFunctionName(
          wire_bytes, index(),
          VectorOf(native_module()->module()->export_table));
228
  WasmName name = wire_bytes.GetNameOrNull(name_ref);
229

230 231
  const WasmDebugSymbols& debug_symbols =
      native_module()->module()->debug_symbols;
232 233
  auto load_wasm_source_map = isolate->wasm_load_source_map_callback();
  auto source_map = native_module()->GetWasmSourceMap();
234 235 236 237 238
  if (!source_map && debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
      !debug_symbols.external_url.is_empty() && load_wasm_source_map) {
    WasmName external_url =
        wire_bytes.GetNameOrNull(debug_symbols.external_url);
    std::string external_url_string(external_url.data(), external_url.size());
239 240 241
    HandleScope scope(isolate);
    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
    Local<v8::String> source_map_str =
242
        load_wasm_source_map(v8_isolate, external_url_string.c_str());
243
    native_module()->SetWasmSourceMap(
244
        std::make_unique<WasmModuleSourceMap>(v8_isolate, source_map_str));
245 246
  }

247
  std::string name_buffer;
248
  if (kind() == kWasmToJsWrapper) {
249 250 251 252
    name_buffer = "wasm-to-js:";
    size_t prefix_len = name_buffer.size();
    constexpr size_t kMaxSigLength = 128;
    name_buffer.resize(prefix_len + kMaxSigLength);
253
    const FunctionSig* sig = native_module()->module()->functions[index_].sig;
254 255 256
    size_t sig_length =
        PrintSignature(VectorOf(&name_buffer[prefix_len], kMaxSigLength), sig);
    name_buffer.resize(prefix_len + sig_length);
257
    // If the import has a name, also append that (separated by "-").
258 259 260
    if (!name.empty()) {
      name_buffer += '-';
      name_buffer.append(name.begin(), name.size());
261
    }
262
    name = VectorOf(name_buffer);
263
  } else if (name.empty()) {
264 265 266 267 268
    name_buffer.resize(32);
    name_buffer.resize(
        SNPrintF(VectorOf(&name_buffer.front(), name_buffer.size()),
                 "wasm-function[%d]", index()));
    name = VectorOf(name_buffer);
269
  }
270 271
  PROFILE(isolate,
          CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name));
272

273
  if (!source_positions().empty()) {
274 275
    LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
                                                       source_positions()));
276 277 278
  }
}

279 280
void WasmCode::Validate() const {
#ifdef DEBUG
281 282
  // Scope for foreign WasmCode pointers.
  WasmCodeRefScope code_ref_scope;
283 284 285 286 287 288 289
  // We expect certain relocation info modes to never appear in {WasmCode}
  // objects or to be restricted to a small set of valid values. Hence the
  // iteration below does not use a mask, but visits all relocation data.
  for (RelocIterator it(instructions(), reloc_info(), constant_pool());
       !it.done(); it.next()) {
    RelocInfo::Mode mode = it.rinfo()->rmode();
    switch (mode) {
290 291
      case RelocInfo::WASM_CALL: {
        Address target = it.rinfo()->wasm_call_address();
292 293 294 295
        WasmCode* code = native_module_->Lookup(target);
        CHECK_NOT_NULL(code);
        CHECK_EQ(WasmCode::kJumpTable, code->kind());
        CHECK(code->contains(target));
296 297
        break;
      }
298 299 300
      case RelocInfo::WASM_STUB_CALL: {
        Address target = it.rinfo()->wasm_stub_call_address();
        WasmCode* code = native_module_->Lookup(target);
301
        CHECK_NOT_NULL(code);
302 303
        CHECK_EQ(WasmCode::kJumpTable, code->kind());
        CHECK(code->contains(target));
304 305
        break;
      }
306 307 308 309 310 311
      case RelocInfo::INTERNAL_REFERENCE:
      case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
        Address target = it.rinfo()->target_internal_reference();
        CHECK(contains(target));
        break;
      }
312 313 314 315 316 317 318 319 320 321 322 323
      case RelocInfo::EXTERNAL_REFERENCE:
      case RelocInfo::CONST_POOL:
      case RelocInfo::VENEER_POOL:
        // These are OK to appear.
        break;
      default:
        FATAL("Unexpected mode: %d", mode);
    }
  }
#endif
}

324 325 326 327 328 329 330 331
void WasmCode::MaybePrint(const char* name) const {
  // Determines whether flags want this code to be printed.
  if ((FLAG_print_wasm_code && kind() == kFunction) ||
      (FLAG_print_wasm_stub_code && kind() != kFunction) || FLAG_print_code) {
    Print(name);
  }
}

332
void WasmCode::Print(const char* name) const {
333
  StdoutStream os;
334
  os << "--- WebAssembly code ---\n";
335
  Disassemble(name, os);
336 337 338 339 340 341
  if (native_module_->HasDebugInfo()) {
    if (auto* debug_side_table =
            native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
      debug_side_table->Print(os);
    }
  }
342
  os << "--- End code ---\n";
343 344
}

345
void WasmCode::Disassemble(const char* name, std::ostream& os,
346
                           Address current_pc) const {
347
  if (name) os << "name: " << name << "\n";
348
  if (!IsAnonymous()) os << "index: " << index() << "\n";
349
  os << "kind: " << GetWasmCodeKindAsString(kind()) << "\n";
350
  os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
351 352 353
  size_t padding = instructions().size() - unpadded_binary_size_;
  os << "Body (size = " << instructions().size() << " = "
     << unpadded_binary_size_ << " + " << padding << " padding)\n";
354

355
#ifdef ENABLE_DISASSEMBLER
356
  int instruction_size = unpadded_binary_size_;
357
  if (constant_pool_offset_ < instruction_size) {
358 359 360 361 362
    instruction_size = constant_pool_offset_;
  }
  if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
    instruction_size = safepoint_table_offset_;
  }
363
  if (handler_table_offset_ < instruction_size) {
364 365
    instruction_size = handler_table_offset_;
  }
366
  DCHECK_LT(0, instruction_size);
367
  os << "Instructions (size = " << instruction_size << ")\n";
368 369
  Disassembler::Decode(nullptr, &os, instructions().begin(),
                       instructions().begin() + instruction_size,
370
                       CodeReference(this), current_pc);
371 372
  os << "\n";

373
  if (handler_table_size() > 0) {
374
    HandlerTable table(this);
375 376 377 378 379 380
    os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
       << "):\n";
    table.HandlerTableReturnPrint(os);
    os << "\n";
  }

381
  if (protected_instructions_size_ > 0) {
382 383 384 385 386 387 388 389
    os << "Protected instructions:\n pc offset  land pad\n";
    for (auto& data : protected_instructions()) {
      os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
         << std::hex << data.landing_offset << "\n";
    }
    os << "\n";
  }

390
  if (!source_positions().empty()) {
391 392 393 394 395 396
    os << "Source positions:\n pc offset  position\n";
    for (SourcePositionTableIterator it(source_positions()); !it.done();
         it.Advance()) {
      os << std::setw(10) << std::hex << it.code_offset() << std::dec
         << std::setw(10) << it.source_position().ScriptOffset()
         << (it.is_statement() ? "  statement" : "") << "\n";
397
    }
398
    os << "\n";
399
  }
400

401
  if (safepoint_table_offset_ > 0) {
402
    SafepointTable table(this);
403 404 405 406 407 408 409 410 411 412 413
    os << "Safepoints (size = " << table.size() << ")\n";
    for (uint32_t i = 0; i < table.length(); i++) {
      uintptr_t pc_offset = table.GetPcOffset(i);
      os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
      os << std::setw(6) << std::hex << pc_offset << "  " << std::dec;
      table.PrintEntry(i, os);
      os << " (sp -> fp)";
      SafepointEntry entry = table.GetEntry(i);
      if (entry.trampoline_pc() != -1) {
        os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
      }
414
      if (entry.has_deoptimization_index()) {
415 416 417 418 419 420 421
        os << " deopt: " << std::setw(6) << entry.deoptimization_index();
      }
      os << "\n";
    }
    os << "\n";
  }

422
  os << "RelocInfo (size = " << reloc_info().size() << ")\n";
423 424
  for (RelocIterator it(instructions(), reloc_info(), constant_pool());
       !it.done(); it.next()) {
425
    it.rinfo()->Print(nullptr, os);
426 427
  }
  os << "\n";
428

429 430
  if (code_comments_size() > 0) {
    PrintCodeCommentsSection(os, code_comments(), code_comments_size());
431
  }
432
#endif  // ENABLE_DISASSEMBLER
433 434
}

435 436
const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
  switch (kind) {
437
    case WasmCode::kFunction:
438
      return "wasm function";
439 440
    case WasmCode::kWasmToCapiWrapper:
      return "wasm-to-capi";
441
    case WasmCode::kWasmToJsWrapper:
442
      return "wasm-to-js";
443 444
    case WasmCode::kJumpTable:
      return "jump table";
445 446 447 448
  }
  return "unknown kind";
}

449
WasmCode::~WasmCode() {
450 451
  if (has_trap_handler_index()) {
    trap_handler::ReleaseHandlerData(trap_handler_index());
452 453 454
  }
}

455 456 457 458 459 460 461 462 463
V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
  if (native_module_->engine()->AddPotentiallyDeadCode(this)) {
    // The code just became potentially dead. The ref count we wanted to
    // decrement is now transferred to the set of potentially dead code, and
    // will be decremented when the next GC is run.
    return false;
  }
  // If we reach here, the code was already potentially dead. Decrement the ref
  // count, and return true if it drops to zero.
464
  return DecRefOnDeadCode();
465 466 467
}

// static
468
void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
469 470
  // Decrement the ref counter of all given code objects. Keep the ones whose
  // ref count drops to zero.
471 472
  WasmEngine::DeadCodeMap dead_code;
  WasmEngine* engine = nullptr;
473
  for (WasmCode* code : code_vec) {
474 475 476 477
    if (!code->DecRef()) continue;  // Remaining references.
    dead_code[code->native_module()].push_back(code);
    if (!engine) engine = code->native_module()->engine();
    DCHECK_EQ(engine, code->native_module()->engine());
478 479
  }

480 481
  DCHECK_EQ(dead_code.empty(), engine == nullptr);
  if (engine) engine->FreeDeadCode(dead_code);
482 483
}

484 485 486 487 488 489 490 491 492 493
int WasmCode::GetSourcePositionBefore(int offset) {
  int position = kNoSourcePosition;
  for (SourcePositionTableIterator iterator(source_positions());
       !iterator.done() && iterator.code_offset() < offset;
       iterator.Advance()) {
    position = iterator.source_position().ScriptOffset();
  }
  return position;
}

494 495 496 497 498 499 500 501 502 503
WasmCodeAllocator::OptionalLock::~OptionalLock() {
  if (allocator_) allocator_->mutex_.Unlock();
}

void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
  DCHECK(!is_locked());
  allocator_ = allocator;
  allocator->mutex_.Lock();
}

504 505 506
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;

507 508
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
                                     VirtualMemory code_space,
509
                                     std::shared_ptr<Counters> async_counters)
510 511
    : code_manager_(code_manager),
      free_code_space_(code_space.region()),
512
      async_counters_(std::move(async_counters)) {
513
  owned_code_space_.reserve(4);
514
  owned_code_space_.emplace_back(std::move(code_space));
515
  async_counters_->wasm_module_num_code_spaces()->AddSample(1);
516 517 518 519 520 521 522
}

WasmCodeAllocator::~WasmCodeAllocator() {
  code_manager_->FreeNativeModule(VectorOf(owned_code_space_),
                                  committed_code_space());
}

523 524 525 526 527
void WasmCodeAllocator::Init(NativeModule* native_module) {
  DCHECK_EQ(1, owned_code_space_.size());
  native_module->AddCodeSpace(owned_code_space_[0].region(), {});
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
// memory, we append that memory at the end of the owned_code_space_ list, we
// traverse that list in reverse order to find the reservation(s) that guide how
// to chunk the region to commit.
#if V8_OS_WIN
constexpr bool kNeedsToSplitRangeByReservations = true;
#else
constexpr bool kNeedsToSplitRangeByReservations = false;
#endif

base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
    base::AddressRegion range,
    const std::vector<VirtualMemory>& owned_code_space) {
  if (!kNeedsToSplitRangeByReservations) return {range};

  base::SmallVector<base::AddressRegion, 1> split_ranges;
546 547
  size_t missing_begin = range.begin();
  size_t missing_end = range.end();
548
  for (auto& vmem : base::Reversed(owned_code_space)) {
549 550 551 552 553 554 555 556 557
    Address overlap_begin = std::max(missing_begin, vmem.address());
    Address overlap_end = std::min(missing_end, vmem.end());
    if (overlap_begin >= overlap_end) continue;
    split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
    // Opportunistically reduce the missing range. This might terminate the loop
    // early.
    if (missing_begin == overlap_begin) missing_begin = overlap_end;
    if (missing_end == overlap_end) missing_end = overlap_begin;
    if (missing_begin >= missing_end) break;
558 559 560 561 562 563 564 565 566
  }
#ifdef ENABLE_SLOW_DCHECKS
  // The returned vector should cover the full range.
  size_t total_split_size = 0;
  for (auto split : split_ranges) total_split_size += split.size();
  DCHECK_EQ(range.size(), total_split_size);
#endif
  return split_ranges;
}
567 568

int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {
569
  return NativeModule::kNeedsFarJumpsBetweenCodeSpaces
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
             ? static_cast<int>(num_declared_functions)
             : 0;
}

// Returns an overapproximation of the code size overhead per new code space
// created by the jump tables.
size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {
  // Overhead for the jump table.
  size_t overhead = RoundUp<kCodeAlignment>(
      JumpTableAssembler::SizeForNumberOfSlots(num_declared_functions));

#if defined(V8_OS_WIN64)
  // On Win64, we need to reserve some pages at the beginning of an executable
  // space. See {AddCodeSpace}.
  overhead += Heap::GetCodeRangeReservedAreaSize();
#endif  // V8_OS_WIN64

  // Overhead for the far jump table.
  overhead +=
      RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
          WasmCode::kRuntimeStubCount,
          NumWasmFunctionsInFarJumpTable(num_declared_functions)));

  return overhead;
}

size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
                       size_t total_reserved) {
  size_t overhead = OverheadPerCodeSpace(num_declared_functions);

  // Reserve a power of two at least as big as any of
  //   a) needed size + overhead (this is the minimum needed)
  //   b) 2 * overhead (to not waste too much space by overhead)
  //   c) 1/4 of current total reservation size (to grow exponentially)
604
  size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
605 606 607
      std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
                        2 * overhead),
               total_reserved / 4));
608 609

  // Limit by the maximum supported code space size.
610
  return std::min(WasmCodeAllocator::kMaxCodeSpaceSize, reserve_size);
611 612
}

613 614
}  // namespace

615 616
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
                                                size_t size) {
617 618
  return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion,
                                 WasmCodeAllocator::OptionalLock{});
619 620 621
}

Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
622 623 624 625 626 627 628
    NativeModule* native_module, size_t size, base::AddressRegion region,
    const WasmCodeAllocator::OptionalLock& optional_lock) {
  OptionalLock new_lock;
  if (!optional_lock.is_locked()) new_lock.Lock(this);
  const auto& locked_lock =
      optional_lock.is_locked() ? optional_lock : new_lock;
  DCHECK(locked_lock.is_locked());
629 630 631 632
  DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
  DCHECK_LT(0, size);
  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
  size = RoundUp<kCodeAlignment>(size);
633 634
  base::AddressRegion code_space =
      free_code_space_.AllocateInRegion(size, region);
635 636 637 638 639
  if (V8_UNLIKELY(code_space.is_empty())) {
    // Only allocations without a specific region are allowed to fail. Otherwise
    // the region must have been allocated big enough to hold all initial
    // allocations (jump tables etc).
    CHECK_EQ(kUnrestrictedRegion, region);
640 641 642 643

    Address hint = owned_code_space_.empty() ? kNullAddress
                                             : owned_code_space_.back().end();

644 645
    size_t total_reserved = 0;
    for (auto& vmem : owned_code_space_) total_reserved += vmem.size();
646 647
    size_t reserve_size = ReservationSize(
        size, native_module->module()->num_declared_functions, total_reserved);
648
    VirtualMemory new_mem =
649
        code_manager_->TryAllocate(reserve_size, reinterpret_cast<void*>(hint));
650 651 652 653 654
    if (!new_mem.IsReserved()) {
      V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
      UNREACHABLE();
    }

655 656 657
    base::AddressRegion new_region = new_mem.region();
    code_manager_->AssignRange(new_region, native_module);
    free_code_space_.Merge(new_region);
658
    owned_code_space_.emplace_back(std::move(new_mem));
659
    native_module->AddCodeSpace(new_region, locked_lock);
660

661 662
    code_space = free_code_space_.Allocate(size);
    DCHECK(!code_space.is_empty());
663 664
    async_counters_->wasm_module_num_code_spaces()->AddSample(
        static_cast<int>(owned_code_space_.size()));
665
  }
666 667 668
  const Address commit_page_size = page_allocator->CommitPageSize();
  Address commit_start = RoundUp(code_space.begin(), commit_page_size);
  Address commit_end = RoundUp(code_space.end(), commit_page_size);
669 670 671 672 673 674 675 676 677
  // {commit_start} will be either code_space.start or the start of the next
  // page. {commit_end} will be the start of the page after the one in which
  // the allocation ends.
  // We start from an aligned start, and we know we allocated vmem in
  // page multiples.
  // We just need to commit what's not committed. The page in which we
  // start is already committed (or we start at the beginning of a page).
  // The end needs to be committed all through the end of the page.
  if (commit_start < commit_end) {
678 679
    for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
             {commit_start, commit_end - commit_start}, owned_code_space_)) {
680
      code_manager_->Commit(split_range);
681
    }
682 683 684
    committed_code_space_.fetch_add(commit_end - commit_start);
    // Committed code cannot grow bigger than maximum code space size.
    DCHECK_LE(committed_code_space_.load(), FLAG_wasm_max_code_space * MB);
685 686 687
  }
  DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
  allocated_code_space_.Merge(code_space);
688
  generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
689

690 691
  TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
             code_space.begin(), size);
692 693 694 695
  return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}

bool WasmCodeAllocator::SetExecutable(bool executable) {
696
  base::MutexGuard lock(&mutex_);
697 698 699 700 701 702 703 704 705 706
  if (is_executable_ == executable) return true;
  TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);

  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();

  if (FLAG_wasm_write_protect_code_memory) {
    PageAllocator::Permission permission =
        executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
#if V8_OS_WIN
    // On windows, we need to switch permissions per separate virtual memory
707
    // reservation.
708 709 710 711
    // For now, in that case, we commit at reserved memory granularity.
    // Technically, that may be a waste, because we may reserve more than we
    // use. On 32-bit though, the scarce resource is the address space -
    // committed or not.
712 713 714 715
    for (auto& vmem : owned_code_space_) {
      if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
                          permission)) {
        return false;
716
      }
717 718
      TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
                 executable);
719
    }
720
#else   // V8_OS_WIN
721
    size_t commit_page_size = page_allocator->CommitPageSize();
722 723 724
    for (auto& region : allocated_code_space_.regions()) {
      // allocated_code_space_ is fine-grained, so we need to
      // page-align it.
725
      size_t region_size = RoundUp(region.size(), commit_page_size);
726 727 728 729
      if (!SetPermissions(page_allocator, region.begin(), region_size,
                          permission)) {
        return false;
      }
730 731
      TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
                 region.begin(), region.end(), executable);
732
    }
733
#endif  // V8_OS_WIN
734 735 736 737 738 739 740 741 742
  }
  is_executable_ = executable;
  return true;
}

void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
  // Zap code area and collect freed code regions.
  DisjointAllocationPool freed_regions;
  size_t code_size = 0;
743
  CODE_SPACE_WRITE_SCOPE
744 745 746 747 748 749 750 751 752 753
  for (WasmCode* code : codes) {
    ZapCode(code->instruction_start(), code->instructions().size());
    FlushInstructionCache(code->instruction_start(),
                          code->instructions().size());
    code_size += code->instructions().size();
    freed_regions.Merge(base::AddressRegion{code->instruction_start(),
                                            code->instructions().size()});
  }
  freed_code_size_.fetch_add(code_size);

754 755 756 757
  // Merge {freed_regions} into {freed_code_space_} and put all ranges of full
  // pages to decommit into {regions_to_decommit} (decommitting is expensive,
  // so try to merge regions before decommitting).
  DisjointAllocationPool regions_to_decommit;
758
  PageAllocator* allocator = GetPlatformPageAllocator();
759
  size_t commit_page_size = allocator->CommitPageSize();
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
  {
    base::MutexGuard guard(&mutex_);
    for (auto region : freed_regions.regions()) {
      auto merged_region = freed_code_space_.Merge(region);
      Address discard_start =
          std::max(RoundUp(merged_region.begin(), commit_page_size),
                   RoundDown(region.begin(), commit_page_size));
      Address discard_end =
          std::min(RoundDown(merged_region.end(), commit_page_size),
                   RoundUp(region.end(), commit_page_size));
      if (discard_start >= discard_end) continue;
      regions_to_decommit.Merge({discard_start, discard_end - discard_start});
    }
  }

  for (auto region : regions_to_decommit.regions()) {
    size_t old_committed = committed_code_space_.fetch_sub(region.size());
    DCHECK_GE(old_committed, region.size());
778
    USE(old_committed);
779 780
    for (base::AddressRegion split_range :
         SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
781
      code_manager_->Decommit(split_range);
782
    }
783 784 785
  }
}

786
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
787
  base::MutexGuard lock(&mutex_);
788 789 790
  return owned_code_space_.size();
}

791 792 793
// static
constexpr base::AddressRegion WasmCodeAllocator::kUnrestrictedRegion;

794
NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
795
                           VirtualMemory code_space,
796
                           std::shared_ptr<const WasmModule> module,
797 798
                           std::shared_ptr<Counters> async_counters,
                           std::shared_ptr<NativeModule>* shared_this)
799
    : code_allocator_(engine->code_manager(), std::move(code_space),
800
                      async_counters),
801
      enabled_features_(enabled),
802
      module_(std::move(module)),
803
      import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
804
          new WasmImportWrapperCache())),
805
      engine_(engine),
806 807
      use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
                                                             : kNoTrapHandler) {
808 809 810 811 812 813 814
  // We receive a pointer to an empty {std::shared_ptr}, and install ourselve
  // there.
  DCHECK_NOT_NULL(shared_this);
  DCHECK_NULL(*shared_this);
  shared_this->reset(this);
  compilation_state_ =
      CompilationState::New(*shared_this, std::move(async_counters));
815
  DCHECK_NOT_NULL(module_);
816
  if (module_->num_declared_functions > 0) {
817 818
    code_table_ =
        std::make_unique<WasmCode*[]>(module_->num_declared_functions);
819 820
    num_liftoff_function_calls_ =
        std::make_unique<uint32_t[]>(module_->num_declared_functions);
821 822 823 824 825

    // Start counter at 4 to avoid runtime calls for smaller numbers.
    constexpr int kCounterStart = 4;
    std::fill_n(num_liftoff_function_calls_.get(),
                module_->num_declared_functions, kCounterStart);
826
  }
827
  code_allocator_.Init(this);
828 829
}

830
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
831
  WasmCodeRefScope code_ref_scope;
832 833
  DCHECK_LE(module_->num_declared_functions, max_functions);
  auto new_table = std::make_unique<WasmCode*[]>(max_functions);
834
  if (module_->num_declared_functions > 0) {
835 836
    memcpy(new_table.get(), code_table_.get(),
           module_->num_declared_functions * sizeof(WasmCode*));
837
  }
838
  code_table_ = std::move(new_table);
839

840 841 842 843 844 845
  base::AddressRegion single_code_space_region;
  {
    base::MutexGuard guard(&allocation_mutex_);
    CHECK_EQ(1, code_space_data_.size());
    single_code_space_region = code_space_data_[0].region;
  }
846
  // Re-allocate jump table.
847
  main_jump_table_ = CreateEmptyJumpTableInRegion(
848
      JumpTableAssembler::SizeForNumberOfSlots(max_functions),
849
      single_code_space_region, WasmCodeAllocator::OptionalLock{});
850 851
  base::MutexGuard guard(&allocation_mutex_);
  code_space_data_[0].jump_table = main_jump_table_;
852 853
}

854
void NativeModule::LogWasmCodes(Isolate* isolate) {
855
  if (!WasmCode::ShouldBeLogged(isolate)) return;
856

857 858 859
  TRACE_EVENT1("v8.wasm", "wasm.LogWasmCodes", "num_functions",
               module_->num_declared_functions);

860 861
  // TODO(titzer): we skip the logging of the import wrappers
  // here, but they should be included somehow.
862 863
  int start = module_->num_imported_functions;
  int end = start + module_->num_declared_functions;
864
  WasmCodeRefScope code_ref_scope;
865 866
  for (int func_index = start; func_index < end; ++func_index) {
    if (WasmCode* code = GetCode(func_index)) code->LogCode(isolate);
867 868 869
  }
}

870
CompilationEnv NativeModule::CreateCompilationEnv() const {
871 872
  return {module(), use_trap_handler_, kRuntimeExceptionSupport,
          enabled_features_, kNoLowerSimd};
873 874
}

875
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
876
  CODE_SPACE_WRITE_SCOPE
877 878 879 880 881
  // For off-heap builtins, we create a copy of the off-heap instruction stream
  // instead of the on-heap code object containing the trampoline. Ensure that
  // we do not apply the on-heap reloc info to the off-heap instructions.
  const size_t relocation_size =
      code->is_off_heap_trampoline() ? 0 : code->relocation_size();
882
  OwnedVector<byte> reloc_info;
883
  if (relocation_size > 0) {
884 885
    reloc_info = OwnedVector<byte>::Of(
        Vector<byte>{code->relocation_start(), relocation_size});
886
  }
887 888
  Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
                                     code->GetIsolate());
889
  OwnedVector<byte> source_pos =
890
      OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
891 892 893 894
  if (source_pos_table->length() > 0) {
    source_pos_table->copy_out(0, source_pos.start(),
                               source_pos_table->length());
  }
895
  Vector<const byte> instructions(
896 897
      reinterpret_cast<byte*>(code->InstructionStart()),
      static_cast<size_t>(code->InstructionSize()));
898
  const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
899 900 901 902

  // TODO(jgruber,v8:8758): Remove this translation. It exists only because
  // Code objects contains real offsets but WasmCode expects an offset of 0 to
  // mean 'empty'.
903 904 905 906 907
  const int safepoint_table_offset =
      code->has_safepoint_table() ? code->safepoint_table_offset() : 0;
  const int handler_table_offset = code->handler_table_offset();
  const int constant_pool_offset = code->constant_pool_offset();
  const int code_comments_offset = code->code_comments_offset();
908

909 910
  Vector<uint8_t> dst_code_bytes =
      code_allocator_.AllocateForCode(this, instructions.size());
911
  memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
912

913
  // Apply the relocation delta by iterating over the RelocInfo.
914 915
  intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
                   code->InstructionStart();
916 917
  int mode_mask =
      RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
918
  auto jump_tables_ref =
919
      FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
920 921
  Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
  Address constant_pool_start = dst_code_addr + constant_pool_offset;
922
  RelocIterator orig_it(*code, mode_mask);
923 924
  for (RelocIterator it(dst_code_bytes, reloc_info.as_vector(),
                        constant_pool_start, mode_mask);
925
       !it.done(); it.next(), orig_it.next()) {
926 927
    RelocInfo::Mode mode = it.rinfo()->rmode();
    if (RelocInfo::IsWasmStubCall(mode)) {
928
      uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
929
      DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
930
      Address entry = GetNearRuntimeStubEntry(
931
          static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables_ref);
932
      it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
933
    } else {
934
      it.rinfo()->apply(delta);
935 936
    }
  }
937

938
  // Flush the i-cache after relocation.
939
  FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
940

941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
  std::unique_ptr<WasmCode> new_code{
      new WasmCode{this,                    // native_module
                   kAnonymousFuncIndex,     // index
                   dst_code_bytes,          // instructions
                   stack_slots,             // stack_slots
                   0,                       // tagged_parameter_slots
                   safepoint_table_offset,  // safepoint_table_offset
                   handler_table_offset,    // handler_table_offset
                   constant_pool_offset,    // constant_pool_offset
                   code_comments_offset,    // code_comments_offset
                   instructions.length(),   // unpadded_binary_size
                   {},                      // protected_instructions
                   reloc_info.as_vector(),  // reloc_info
                   source_pos.as_vector(),  // source positions
                   WasmCode::kFunction,     // kind
956 957
                   ExecutionTier::kNone,    // tier
                   kNoDebugging}};          // for_debugging
958
  new_code->MaybePrint();
959 960
  new_code->Validate();

961
  return PublishCode(std::move(new_code));
962 963
}

964 965 966 967 968 969 970 971
void NativeModule::UseLazyStub(uint32_t func_index) {
  DCHECK_LE(module_->num_imported_functions, func_index);
  DCHECK_LT(func_index,
            module_->num_imported_functions + module_->num_declared_functions);

  if (!lazy_compile_table_) {
    uint32_t num_slots = module_->num_declared_functions;
    WasmCodeRefScope code_ref_scope;
972
    CODE_SPACE_WRITE_SCOPE
973 974 975 976 977 978
    base::AddressRegion single_code_space_region;
    {
      base::MutexGuard guard(&allocation_mutex_);
      DCHECK_EQ(1, code_space_data_.size());
      single_code_space_region = code_space_data_[0].region;
    }
979 980
    lazy_compile_table_ = CreateEmptyJumpTableInRegion(
        JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
981
        single_code_space_region, WasmCodeAllocator::OptionalLock{});
982 983 984
    JumpTableAssembler::GenerateLazyCompileTable(
        lazy_compile_table_->instruction_start(), num_slots,
        module_->num_imported_functions,
985 986 987
        GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
                                FindJumpTablesForRegion(base::AddressRegionOf(
                                    lazy_compile_table_->instructions()))));
988 989 990
  }

  // Add jump table entry for jump to the lazy compile stub.
991
  uint32_t slot_index = declared_function_index(module(), func_index);
992
  DCHECK_NULL(code_table_[slot_index]);
993 994 995
  Address lazy_compile_target =
      lazy_compile_table_->instruction_start() +
      JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
996
  base::MutexGuard guard(&allocation_mutex_);
997
  PatchJumpTablesLocked(slot_index, lazy_compile_target);
998 999
}

1000
std::unique_ptr<WasmCode> NativeModule::AddCode(
1001
    int index, const CodeDesc& desc, int stack_slots,
1002 1003
    int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
    Vector<const byte> source_position_table, WasmCode::Kind kind,
1004
    ExecutionTier tier, ForDebugging for_debugging) {
1005 1006 1007
  Vector<byte> code_space =
      code_allocator_.AllocateForCode(this, desc.instr_size);
  auto jump_table_ref =
1008
      FindJumpTablesForRegion(base::AddressRegionOf(code_space));
1009
  return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
1010
                              protected_instructions_data,
1011 1012
                              source_position_table, kind, tier, for_debugging,
                              code_space, jump_table_ref);
1013 1014 1015
}

std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
1016
    int index, const CodeDesc& desc, int stack_slots,
1017 1018
    int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
    Vector<const byte> source_position_table, WasmCode::Kind kind,
1019 1020
    ExecutionTier tier, ForDebugging for_debugging,
    Vector<uint8_t> dst_code_bytes, const JumpTablesRef& jump_tables) {
1021 1022
  Vector<byte> reloc_info{desc.buffer + desc.buffer_size - desc.reloc_size,
                          static_cast<size_t>(desc.reloc_size)};
1023

1024 1025 1026
  // TODO(jgruber,v8:8758): Remove this translation. It exists only because
  // CodeDesc contains real offsets but WasmCode expects an offset of 0 to mean
  // 'empty'.
1027 1028 1029 1030 1031 1032
  const int safepoint_table_offset =
      desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset;
  const int handler_table_offset = desc.handler_table_offset;
  const int constant_pool_offset = desc.constant_pool_offset;
  const int code_comments_offset = desc.code_comments_offset;
  const int instr_size = desc.instr_size;
1033

1034
  CODE_SPACE_WRITE_SCOPE
1035 1036
  memcpy(dst_code_bytes.begin(), desc.buffer,
         static_cast<size_t>(desc.instr_size));
1037

1038
  // Apply the relocation delta by iterating over the RelocInfo.
1039
  intptr_t delta = dst_code_bytes.begin() - desc.buffer;
1040
  int mode_mask = RelocInfo::kApplyMask |
1041
                  RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
1042
                  RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
1043 1044
  Address code_start = reinterpret_cast<Address>(dst_code_bytes.begin());
  Address constant_pool_start = code_start + constant_pool_offset;
1045 1046
  for (RelocIterator it(dst_code_bytes, reloc_info, constant_pool_start,
                        mode_mask);
1047 1048
       !it.done(); it.next()) {
    RelocInfo::Mode mode = it.rinfo()->rmode();
1049 1050
    if (RelocInfo::IsWasmCall(mode)) {
      uint32_t call_tag = it.rinfo()->wasm_call_tag();
1051
      Address target = GetNearCallTargetForFunction(call_tag, jump_tables);
1052 1053 1054
      it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
    } else if (RelocInfo::IsWasmStubCall(mode)) {
      uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
1055
      DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
1056
      Address entry = GetNearRuntimeStubEntry(
1057
          static_cast<WasmCode::RuntimeStubId>(stub_call_tag), jump_tables);
1058
      it.rinfo()->set_wasm_stub_call_address(entry, SKIP_ICACHE_FLUSH);
1059 1060 1061 1062
    } else {
      it.rinfo()->apply(delta);
    }
  }
1063

1064 1065 1066
  // Flush the i-cache after relocation.
  FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());

1067 1068 1069 1070
  // Liftoff code will not be relocated or serialized, thus do not store any
  // relocation information.
  if (tier == ExecutionTier::kLiftoff) reloc_info = {};

1071 1072 1073
  std::unique_ptr<WasmCode> code{new WasmCode{
      this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
      safepoint_table_offset, handler_table_offset, constant_pool_offset,
1074
      code_comments_offset, instr_size, protected_instructions_data, reloc_info,
1075
      source_position_table, kind, tier, for_debugging}};
1076 1077 1078
  code->MaybePrint();
  code->Validate();

1079 1080
  return code;
}
1081

1082
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
1083
  base::MutexGuard lock(&allocation_mutex_);
1084 1085 1086
  return PublishCodeLocked(std::move(code));
}

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
std::vector<WasmCode*> NativeModule::PublishCode(
    Vector<std::unique_ptr<WasmCode>> codes) {
  std::vector<WasmCode*> published_code;
  published_code.reserve(codes.size());
  base::MutexGuard lock(&allocation_mutex_);
  // The published code is put into the top-most surrounding {WasmCodeRefScope}.
  for (auto& code : codes) {
    published_code.push_back(PublishCodeLocked(std::move(code)));
  }
  return published_code;
}

1099 1100 1101 1102 1103
WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
  switch (result.kind) {
    case WasmCompilationResult::kWasmToJsWrapper:
      return WasmCode::Kind::kWasmToJsWrapper;
    case WasmCompilationResult::kFunction:
1104
      return WasmCode::Kind::kFunction;
1105
    default:
1106
      UNREACHABLE();
1107 1108 1109
  }
}

1110
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
1111 1112
  // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
  DCHECK(!allocation_mutex_.TryLock());
1113

1114 1115
  if (!code->IsAnonymous() &&
      code->index() >= module_->num_imported_functions) {
1116 1117
    DCHECK_LT(code->index(), num_functions());

1118 1119
    code->RegisterTrapHandlerData();

1120 1121
    // Assume an order of execution tiers that represents the quality of their
    // generated code.
1122
    static_assert(ExecutionTier::kNone < ExecutionTier::kLiftoff &&
1123
                      ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
1124 1125
                  "Assume an order on execution tiers");

1126
    uint32_t slot_idx = declared_function_index(module(), code->index());
1127
    WasmCode* prior_code = code_table_[slot_idx];
1128 1129 1130 1131
    // If we are tiered down, install all debugging code (except for stepping
    // code, which is only used for a single frame and never installed in the
    // code table of jump table). Otherwise, install code if it was compiled
    // with a higher tier.
1132 1133 1134
    static_assert(
        kForDebugging > kNoDebugging && kWithBreakpoints > kForDebugging,
        "for_debugging is ordered");
1135
    const bool update_code_table =
1136 1137 1138 1139 1140 1141 1142 1143
        // Never install stepping code.
        code->for_debugging() != kForStepping &&
        (!prior_code ||
         (tiering_state_ == kTieredDown
              // Tiered down: Install breakpoints over normal debug code.
              ? prior_code->for_debugging() <= code->for_debugging()
              // Tiered up: Install if the tier is higher than before.
              : prior_code->tier() < code->tier()));
1144
    if (update_code_table) {
1145
      code_table_[slot_idx] = code.get();
1146 1147 1148 1149 1150 1151
      if (prior_code) {
        WasmCodeRefScope::AddRef(prior_code);
        // The code is added to the current {WasmCodeRefScope}, hence the ref
        // count cannot drop to zero here.
        CHECK(!prior_code->DecRef());
      }
1152

1153
      PatchJumpTablesLocked(slot_idx, code->instruction_start());
1154
    }
1155 1156 1157 1158
    if (!code->for_debugging() && tiering_state_ == kTieredDown &&
        code->tier() == ExecutionTier::kTurbofan) {
      liftoff_bailout_count_.fetch_add(1);
    }
1159
  }
1160
  WasmCodeRefScope::AddRef(code.get());
1161
  WasmCode* result = code.get();
1162
  owned_code_.emplace(result->instruction_start(), std::move(code));
1163
  return result;
1164 1165
}

1166
WasmCode* NativeModule::AddDeserializedCode(
1167 1168 1169 1170
    int index, Vector<const byte> instructions, int stack_slots,
    int tagged_parameter_slots, int safepoint_table_offset,
    int handler_table_offset, int constant_pool_offset,
    int code_comments_offset, int unpadded_binary_size,
1171 1172 1173
    Vector<const byte> protected_instructions_data,
    Vector<const byte> reloc_info, Vector<const byte> source_position_table,
    WasmCode::Kind kind, ExecutionTier tier) {
1174
  // CodeSpaceWriteScope is provided by the caller.
1175 1176
  Vector<uint8_t> dst_code_bytes =
      code_allocator_.AllocateForCode(this, instructions.size());
1177
  memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
1178

1179
  std::unique_ptr<WasmCode> code{new WasmCode{
1180
      this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
1181
      safepoint_table_offset, handler_table_offset, constant_pool_offset,
1182
      code_comments_offset, unpadded_binary_size, protected_instructions_data,
1183
      reloc_info, source_position_table, kind, tier, kNoDebugging}};
1184

1185 1186 1187
  // Note: we do not flush the i-cache here, since the code needs to be
  // relocated anyway. The caller is responsible for flushing the i-cache later.

1188
  return PublishCode(std::move(code));
1189 1190
}

1191
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
1192
  base::MutexGuard lock(&allocation_mutex_);
1193 1194 1195
  WasmCode** start = code_table_.get();
  WasmCode** end = start + module_->num_declared_functions;
  return std::vector<WasmCode*>{start, end};
1196 1197
}

1198 1199
WasmCode* NativeModule::GetCode(uint32_t index) const {
  base::MutexGuard guard(&allocation_mutex_);
1200
  WasmCode* code = code_table_[declared_function_index(module(), index)];
1201
  if (code) WasmCodeRefScope::AddRef(code);
1202 1203 1204 1205 1206
  return code;
}

bool NativeModule::HasCode(uint32_t index) const {
  base::MutexGuard guard(&allocation_mutex_);
1207
  return code_table_[declared_function_index(module(), index)] != nullptr;
1208 1209
}

1210 1211 1212 1213 1214 1215
bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
  base::MutexGuard guard(&allocation_mutex_);
  return code_table_[declared_function_index(module(), index)] != nullptr &&
         code_table_[declared_function_index(module(), index)]->tier() == tier;
}

1216 1217 1218 1219 1220 1221 1222 1223 1224
void NativeModule::SetWasmSourceMap(
    std::unique_ptr<WasmModuleSourceMap> source_map) {
  source_map_ = std::move(source_map);
}

WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
  return source_map_.get();
}

1225
WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
1226
    int jump_table_size, base::AddressRegion region,
1227
    const WasmCodeAllocator::OptionalLock& allocator_lock) {
1228
  // Only call this if we really need a jump table.
1229
  DCHECK_LT(0, jump_table_size);
1230 1231
  Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
      this, jump_table_size, region, allocator_lock);
1232
  DCHECK(!code_space.empty());
1233
  CODE_SPACE_WRITE_SCOPE
1234
  ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
1235
  std::unique_ptr<WasmCode> code{
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
      new WasmCode{this,                  // native_module
                   kAnonymousFuncIndex,   // index
                   code_space,            // instructions
                   0,                     // stack_slots
                   0,                     // tagged_parameter_slots
                   0,                     // safepoint_table_offset
                   jump_table_size,       // handler_table_offset
                   jump_table_size,       // constant_pool_offset
                   jump_table_size,       // code_comments_offset
                   jump_table_size,       // unpadded_binary_size
                   {},                    // protected_instructions
                   {},                    // reloc_info
                   {},                    // source_pos
                   WasmCode::kJumpTable,  // kind
                   ExecutionTier::kNone,  // tier
                   kNoDebugging}};        // for_debugging
1252
  return PublishCode(std::move(code));
1253 1254
}

1255
void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
1256 1257 1258
  // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
  DCHECK(!allocation_mutex_.TryLock());

1259
  CODE_SPACE_WRITE_SCOPE
1260
  for (auto& code_space_data : code_space_data_) {
1261
    DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
1262
    if (!code_space_data.jump_table) continue;
1263
    PatchJumpTableLocked(code_space_data, slot_index, target);
1264 1265 1266
  }
}

1267
void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
1268
                                        uint32_t slot_index, Address target) {
1269 1270 1271 1272 1273 1274
  // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
  DCHECK(!allocation_mutex_.TryLock());

  DCHECK_NOT_NULL(code_space_data.jump_table);
  DCHECK_NOT_NULL(code_space_data.far_jump_table);

1275
  DCHECK_LT(slot_index, module_->num_declared_functions);
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
  Address jump_table_slot =
      code_space_data.jump_table->instruction_start() +
      JumpTableAssembler::JumpSlotIndexToOffset(slot_index);
  uint32_t far_jump_table_offset = JumpTableAssembler::FarJumpSlotIndexToOffset(
      WasmCode::kRuntimeStubCount + slot_index);
  // Only pass the far jump table start if the far jump table actually has a
  // slot for this function index (i.e. does not only contain runtime stubs).
  bool has_far_jump_slot =
      far_jump_table_offset <
      code_space_data.far_jump_table->instructions().size();
  Address far_jump_table_start =
      code_space_data.far_jump_table->instruction_start();
  Address far_jump_table_slot =
      has_far_jump_slot ? far_jump_table_start + far_jump_table_offset
                        : kNullAddress;
  JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, far_jump_table_slot,
                                         target);
}

1295 1296 1297
void NativeModule::AddCodeSpace(
    base::AddressRegion region,
    const WasmCodeAllocator::OptionalLock& allocator_lock) {
1298 1299
  // Each code space must be at least twice as large as the overhead per code
  // space. Otherwise, we are wasting too much memory.
1300 1301
  DCHECK_GE(region.size(),
            2 * OverheadPerCodeSpace(module()->num_declared_functions));
1302 1303 1304 1305 1306 1307 1308 1309

#if defined(V8_OS_WIN64)
  // On some platforms, specifically Win64, we need to reserve some pages at
  // the beginning of an executable space.
  // See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
  // https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
  // for details.
  if (engine_->code_manager()
1310
          ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
1311 1312
    size_t size = Heap::GetCodeRangeReservedAreaSize();
    DCHECK_LT(0, size);
1313 1314
    Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
        this, size, region, allocator_lock);
1315
    CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
1316 1317
    win64_unwindinfo::RegisterNonABICompliantCodeRange(
        reinterpret_cast<void*>(region.begin()), region.size());
1318 1319 1320 1321
  }
#endif  // V8_OS_WIN64

  WasmCodeRefScope code_ref_scope;
1322
  CODE_SPACE_WRITE_SCOPE
1323
  WasmCode* jump_table = nullptr;
1324
  WasmCode* far_jump_table = nullptr;
1325
  const uint32_t num_wasm_functions = module_->num_declared_functions;
1326
  const bool is_first_code_space = code_space_data_.empty();
1327 1328 1329
  // We always need a far jump table, because it contains the runtime stubs.
  const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
  const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
1330 1331

  if (needs_jump_table) {
1332
    jump_table = CreateEmptyJumpTableInRegion(
1333 1334
        JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
        allocator_lock);
1335 1336 1337
    CHECK(region.contains(jump_table->instruction_start()));
  }

1338 1339 1340 1341 1342 1343 1344 1345 1346
  if (needs_far_jump_table) {
    int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
    far_jump_table = CreateEmptyJumpTableInRegion(
        JumpTableAssembler::SizeForNumberOfFarJumpSlots(
            WasmCode::kRuntimeStubCount,
            NumWasmFunctionsInFarJumpTable(num_function_slots)),
        region, allocator_lock);
    CHECK(region.contains(far_jump_table->instruction_start()));
    EmbeddedData embedded_data = EmbeddedData::FromBlob();
1347 1348
#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
1349 1350
    Builtins::Name stub_names[WasmCode::kRuntimeStubCount] = {
        WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
1351 1352
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
1353 1354 1355 1356 1357 1358 1359 1360 1361
    Address builtin_addresses[WasmCode::kRuntimeStubCount];
    for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
      Builtins::Name builtin = stub_names[i];
      CHECK(embedded_data.ContainsBuiltin(builtin));
      builtin_addresses[i] = embedded_data.InstructionStartOfBuiltin(builtin);
    }
    JumpTableAssembler::GenerateFarJumpTable(
        far_jump_table->instruction_start(), builtin_addresses,
        WasmCode::kRuntimeStubCount, num_function_slots);
1362 1363
  }

1364 1365 1366 1367 1368 1369 1370
  if (is_first_code_space) {
    // This can be updated and accessed without locks, since the addition of the
    // first code space happens during initialization of the {NativeModule},
    // where no concurrent accesses are possible.
    main_jump_table_ = jump_table;
    main_far_jump_table_ = far_jump_table;
  }
1371

1372
  base::MutexGuard guard(&allocation_mutex_);
1373
  code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
1374 1375 1376 1377

  if (jump_table && !is_first_code_space) {
    // Patch the new jump table(s) with existing functions. If this is the first
    // code space, there cannot be any functions that have been compiled yet.
1378
    const CodeSpaceData& new_code_space_data = code_space_data_.back();
1379 1380
    for (uint32_t slot_index = 0; slot_index < num_wasm_functions;
         ++slot_index) {
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
      if (code_table_[slot_index]) {
        PatchJumpTableLocked(new_code_space_data, slot_index,
                             code_table_[slot_index]->instruction_start());
      } else if (lazy_compile_table_) {
        Address lazy_compile_target =
            lazy_compile_table_->instruction_start() +
            JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
        PatchJumpTableLocked(new_code_space_data, slot_index,
                             lazy_compile_target);
      }
1391 1392
    }
  }
1393 1394
}

1395 1396 1397
namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {
 public:
1398 1399 1400
  explicit NativeModuleWireBytesStorage(
      std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes)
      : wire_bytes_(std::move(wire_bytes)) {}
1401 1402

  Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
1403 1404 1405
    return std::atomic_load(&wire_bytes_)
        ->as_vector()
        .SubVector(ref.offset(), ref.end_offset());
1406 1407 1408
  }

 private:
1409
  const std::shared_ptr<OwnedVector<const uint8_t>> wire_bytes_;
1410 1411 1412
};
}  // namespace

1413 1414 1415
void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
  auto shared_wire_bytes =
      std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
1416
  std::atomic_store(&wire_bytes_, shared_wire_bytes);
1417
  if (!shared_wire_bytes->empty()) {
1418
    compilation_state_->SetWireBytesStorage(
1419 1420
        std::make_shared<NativeModuleWireBytesStorage>(
            std::move(shared_wire_bytes)));
1421 1422 1423
  }
}

1424
WasmCode* NativeModule::Lookup(Address pc) const {
1425
  base::MutexGuard lock(&allocation_mutex_);
1426 1427 1428 1429 1430 1431 1432 1433
  auto iter = owned_code_.upper_bound(pc);
  if (iter == owned_code_.begin()) return nullptr;
  --iter;
  WasmCode* candidate = iter->second.get();
  DCHECK_EQ(candidate->instruction_start(), iter->first);
  if (!candidate->contains(pc)) return nullptr;
  WasmCodeRefScope::AddRef(candidate);
  return candidate;
1434 1435
}

1436
uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
1437
  uint32_t slot_idx = declared_function_index(module(), func_index);
1438 1439 1440
  return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
}

1441 1442
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
  // Return the jump table slot for that function index.
1443
  DCHECK_NOT_NULL(main_jump_table_);
1444
  uint32_t slot_offset = GetJumpTableOffset(func_index);
1445 1446
  DCHECK_LT(slot_offset, main_jump_table_->instructions().size());
  return main_jump_table_->instruction_start() + slot_offset;
1447 1448
}

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
    base::AddressRegion code_region) const {
  auto jump_table_usable = [code_region](const WasmCode* jump_table) {
    Address table_start = jump_table->instruction_start();
    Address table_end = table_start + jump_table->instructions().size();
    // Compute the maximum distance from anywhere in the code region to anywhere
    // in the jump table, avoiding any underflow.
    size_t max_distance = std::max(
        code_region.end() > table_start ? code_region.end() - table_start : 0,
        table_end > code_region.begin() ? table_end - code_region.begin() : 0);
1459
    return max_distance < WasmCodeAllocator::kMaxCodeSpaceSize;
1460
  };
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472

  // Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
  // Access to these fields is possible without locking, since these fields are
  // initialized on construction of the {NativeModule}.
  if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
      (main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
    return {
        main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
        main_far_jump_table_->instruction_start()};
  }

  // Otherwise, take the mutex and look for another suitable jump table.
1473 1474
  base::MutexGuard guard(&allocation_mutex_);
  for (auto& code_space_data : code_space_data_) {
1475 1476 1477 1478 1479 1480 1481 1482 1483
    DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
    if (!code_space_data.far_jump_table) continue;
    // Only return these jump tables if they are reachable from the whole
    // {code_region}.
    if (kNeedsFarJumpsBetweenCodeSpaces &&
        (!jump_table_usable(code_space_data.far_jump_table) ||
         (code_space_data.jump_table &&
          !jump_table_usable(code_space_data.jump_table)))) {
      continue;
1484
    }
1485 1486 1487 1488
    return {code_space_data.jump_table
                ? code_space_data.jump_table->instruction_start()
                : kNullAddress,
            code_space_data.far_jump_table->instruction_start()};
1489
  }
1490
  return {};
1491 1492
}

1493 1494
Address NativeModule::GetNearCallTargetForFunction(
    uint32_t func_index, const JumpTablesRef& jump_tables) const {
1495
  DCHECK(jump_tables.is_valid());
1496 1497 1498 1499 1500 1501
  uint32_t slot_offset = GetJumpTableOffset(func_index);
  return jump_tables.jump_table_start + slot_offset;
}

Address NativeModule::GetNearRuntimeStubEntry(
    WasmCode::RuntimeStubId index, const JumpTablesRef& jump_tables) const {
1502
  DCHECK(jump_tables.is_valid());
1503 1504
  auto offset = JumpTableAssembler::FarJumpSlotIndexToOffset(index);
  return jump_tables.far_jump_table_start + offset;
1505 1506
}

1507 1508
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
    Address slot_address) const {
1509 1510 1511 1512 1513 1514
  WasmCodeRefScope code_refs;
  WasmCode* code = Lookup(slot_address);
  DCHECK_NOT_NULL(code);
  DCHECK_EQ(WasmCode::kJumpTable, code->kind());
  uint32_t slot_offset =
      static_cast<uint32_t>(slot_address - code->instruction_start());
1515
  uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
1516
  DCHECK_LT(slot_idx, module_->num_declared_functions);
1517 1518 1519
  DCHECK_EQ(slot_address,
            code->instruction_start() +
                JumpTableAssembler::JumpSlotIndexToOffset(slot_idx));
1520
  return module_->num_imported_functions + slot_idx;
1521 1522
}

1523 1524 1525 1526
WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
  base::MutexGuard guard(&allocation_mutex_);

  for (auto& code_space_data : code_space_data_) {
1527 1528
    if (code_space_data.far_jump_table != nullptr &&
        code_space_data.far_jump_table->contains(target)) {
1529 1530 1531 1532 1533 1534 1535 1536 1537
      uint32_t offset = static_cast<uint32_t>(
          target - code_space_data.far_jump_table->instruction_start());
      uint32_t index = JumpTableAssembler::FarJumpSlotOffsetToIndex(offset);
      if (index >= WasmCode::kRuntimeStubCount) continue;
      if (JumpTableAssembler::FarJumpSlotIndexToOffset(index) != offset) {
        continue;
      }
      return static_cast<WasmCode::RuntimeStubId>(index);
    }
1538
  }
1539 1540 1541 1542 1543

  // Invalid address.
  return WasmCode::kRuntimeStubCount;
}

1544
NativeModule::~NativeModule() {
1545
  TRACE_HEAP("Deleting native module: %p\n", this);
1546 1547
  // Cancel all background compilation before resetting any field of the
  // NativeModule or freeing anything.
1548
  compilation_state_->CancelCompilation();
1549
  engine_->FreeNativeModule(this);
1550 1551 1552 1553
  // Free the import wrapper cache before releasing the {WasmCode} objects in
  // {owned_code_}. The destructor of {WasmImportWrapperCache} still needs to
  // decrease reference counts on the {WasmCode} objects.
  import_wrapper_cache_.reset();
1554 1555
}

1556 1557
WasmCodeManager::WasmCodeManager(size_t max_committed)
    : max_committed_code_space_(max_committed),
1558
      critical_committed_code_space_(max_committed / 2) {
1559
  DCHECK_LE(max_committed, FLAG_wasm_max_code_space * MB);
1560 1561
}

1562
#if defined(V8_OS_WIN64)
1563 1564
bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
  return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
1565
         FLAG_win64_unwinding_info;
1566
}
1567
#endif  // V8_OS_WIN64
1568

1569
void WasmCodeManager::Commit(base::AddressRegion region) {
1570
  // TODO(v8:8462): Remove eager commit once perf supports remapping.
1571
  if (V8_UNLIKELY(FLAG_perf_prof)) return;
1572 1573
  DCHECK(IsAligned(region.begin(), CommitPageSize()));
  DCHECK(IsAligned(region.size(), CommitPageSize()));
1574 1575 1576
  // Reserve the size. Use CAS loop to avoid overflow on
  // {total_committed_code_space_}.
  size_t old_value = total_committed_code_space_.load();
1577
  while (true) {
1578
    DCHECK_GE(max_committed_code_space_, old_value);
1579 1580 1581 1582 1583 1584
    if (region.size() > max_committed_code_space_ - old_value) {
      V8::FatalProcessOutOfMemory(
          nullptr,
          "WasmCodeManager::Commit: Exceeding maximum wasm code space");
      UNREACHABLE();
    }
1585 1586
    if (total_committed_code_space_.compare_exchange_weak(
            old_value, old_value + region.size())) {
1587 1588
      break;
    }
1589
  }
1590 1591 1592 1593
  PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
                                             ? PageAllocator::kReadWrite
                                             : PageAllocator::kReadWriteExecute;

1594 1595
  TRACE_HEAP("Setting rw permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
             region.begin(), region.end());
1596

1597 1598
  if (!SetPermissions(GetPlatformPageAllocator(), region.begin(), region.size(),
                      permission)) {
1599
    // Highly unlikely.
1600 1601 1602 1603
    V8::FatalProcessOutOfMemory(
        nullptr,
        "WasmCodeManager::Commit: Cannot make pre-reserved region writable");
    UNREACHABLE();
1604 1605 1606
  }
}

1607
void WasmCodeManager::Decommit(base::AddressRegion region) {
1608
  // TODO(v8:8462): Remove this once perf supports remapping.
1609
  if (V8_UNLIKELY(FLAG_perf_prof)) return;
1610
  PageAllocator* allocator = GetPlatformPageAllocator();
1611 1612 1613 1614
  DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
  DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
  size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
  DCHECK_LE(region.size(), old_committed);
1615
  USE(old_committed);
1616 1617
  TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
             region.begin(), region.end());
1618 1619
  CHECK(allocator->SetPermissions(reinterpret_cast<void*>(region.begin()),
                                  region.size(), PageAllocator::kNoAccess));
1620 1621
}

1622 1623
void WasmCodeManager::AssignRange(base::AddressRegion region,
                                  NativeModule* native_module) {
1624
  base::MutexGuard lock(&native_modules_mutex_);
1625 1626
  lookup_map_.insert(std::make_pair(
      region.begin(), std::make_pair(region.end(), native_module)));
1627 1628
}

1629
VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
1630
  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1631
  DCHECK_GT(size, 0);
1632 1633
  size_t allocate_page_size = page_allocator->AllocatePageSize();
  size = RoundUp(size, allocate_page_size);
1634
  if (!BackingStore::ReserveAddressSpace(size)) return {};
1635
  if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
1636

1637
  VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
1638
  if (!mem.IsReserved()) {
1639
    BackingStore::ReleaseReservation(size);
1640
    return {};
1641
  }
1642 1643
  TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
             mem.end(), mem.size());
1644

1645
  // TODO(v8:8462): Remove eager commit once perf supports remapping.
1646 1647 1648 1649
  if (FLAG_perf_prof) {
    SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
                   PageAllocator::kReadWriteExecute);
  }
1650
  return mem;
1651 1652
}

1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
namespace {
// The numbers here are rough estimates, used to calculate the size of the
// initial code reservation and for estimating the amount of external memory
// reported to the GC.
// They do not need to be accurate. Choosing them too small will result in
// separate code spaces being allocated (compile time and runtime overhead),
// choosing them too large results in over-reservation (virtual address space
// only).
// The current numbers have been determined on 2019-11-11 by clemensb@, based
// on one small and one large module compiled from C++ by Emscripten. If in
// doubt, they where chosen slightly larger than required, as over-reservation
// is not a big issue currently.
// Numbers will change when Liftoff or TurboFan evolve, other toolchains are
// used to produce the wasm code, or characteristics of wasm modules on the
// web change. They might require occasional tuning.
// This patch might help to find reasonable numbers for any future adaptation:
// https://crrev.com/c/1910945
#if V8_TARGET_ARCH_X64
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 60;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
constexpr size_t kImportSize = 350;
#elif V8_TARGET_ARCH_IA32
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 60;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
constexpr size_t kImportSize = 480;
#elif V8_TARGET_ARCH_ARM
constexpr size_t kTurbofanFunctionOverhead = 40;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 108;
constexpr size_t kLiftoffCodeSizeMultiplier = 7;
constexpr size_t kImportSize = 750;
#elif V8_TARGET_ARCH_ARM64
constexpr size_t kTurbofanFunctionOverhead = 60;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 80;
constexpr size_t kLiftoffCodeSizeMultiplier = 7;
constexpr size_t kImportSize = 750;
#else
// Other platforms should add their own estimates if needed. Numbers below are
// the minimum of other architectures.
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 60;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
constexpr size_t kImportSize = 350;
#endif
}  // namespace

// static
size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {
  return kLiftoffFunctionOverhead + kCodeAlignment / 2 +
         body_size * kLiftoffCodeSizeMultiplier;
}

1711
// static
1712 1713
size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module,
                                                     bool include_liftoff) {
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
  int num_functions = static_cast<int>(module->num_declared_functions);
  int num_imported_functions = static_cast<int>(module->num_imported_functions);
  int code_section_length = 0;
  if (num_functions > 0) {
    DCHECK_EQ(module->functions.size(), num_imported_functions + num_functions);
    auto* first_fn = &module->functions[module->num_imported_functions];
    auto* last_fn = &module->functions.back();
    code_section_length =
        static_cast<int>(last_fn->code.end_offset() - first_fn->code.offset());
  }
  return EstimateNativeModuleCodeSize(num_functions, num_imported_functions,
1725
                                      code_section_length, include_liftoff);
1726 1727 1728 1729 1730
}

// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(int num_functions,
                                                     int num_imported_functions,
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
                                                     int code_section_length,
                                                     bool include_liftoff) {
  const size_t overhead_per_function =
      kTurbofanFunctionOverhead + kCodeAlignment / 2 +
      (include_liftoff ? kLiftoffFunctionOverhead + kCodeAlignment / 2 : 0);
  const size_t overhead_per_code_byte =
      kTurbofanCodeSizeMultiplier +
      (include_liftoff ? kLiftoffCodeSizeMultiplier : 0);
  const size_t jump_table_size = RoundUp<kCodeAlignment>(
      JumpTableAssembler::SizeForNumberOfSlots(num_functions));
  const size_t far_jump_table_size =
      RoundUp<kCodeAlignment>(JumpTableAssembler::SizeForNumberOfFarJumpSlots(
          WasmCode::kRuntimeStubCount,
          NumWasmFunctionsInFarJumpTable(num_functions)));
  return jump_table_size                                 // jump table
         + far_jump_table_size                           // far jump table
         + overhead_per_function * num_functions         // per function
         + overhead_per_code_byte * code_section_length  // per code byte
         + kImportSize * num_imported_functions;         // per import
1750 1751
}

1752
// static
1753
size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
    const WasmModule* module) {
  size_t wasm_module_estimate = EstimateStoredSize(module);

  uint32_t num_wasm_functions = module->num_declared_functions;

  // TODO(wasm): Include wire bytes size.
  size_t native_module_estimate =
      sizeof(NativeModule) +                     /* NativeModule struct */
      (sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
      (sizeof(WasmCode) * num_wasm_functions);   /* code object size */

  return wasm_module_estimate + native_module_estimate;
}

1768
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
1769
    WasmEngine* engine, Isolate* isolate, const WasmFeatures& enabled,
1770
    size_t code_size_estimate, std::shared_ptr<const WasmModule> module) {
1771
  DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
1772 1773
  if (total_committed_code_space_.load() >
      critical_committed_code_space_.load()) {
1774 1775
    (reinterpret_cast<v8::Isolate*>(isolate))
        ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
1776 1777 1778 1779
    size_t committed = total_committed_code_space_.load();
    DCHECK_GE(max_committed_code_space_, committed);
    critical_committed_code_space_.store(
        committed + (max_committed_code_space_ - committed) / 2);
1780 1781
  }

1782
  // If we cannot add code space later, reserve enough address space up front.
1783
  size_t code_vmem_size =
1784
      ReservationSize(code_size_estimate, module->num_declared_functions, 0);
1785 1786 1787 1788 1789 1790 1791 1792 1793

  // The '--wasm-max-code-space-reservation' testing flag can be used to reduce
  // the maximum size of the initial code space reservation (in MB).
  if (FLAG_wasm_max_initial_code_space_reservation > 0) {
    size_t flag_max_bytes =
        static_cast<size_t>(FLAG_wasm_max_initial_code_space_reservation) * MB;
    if (flag_max_bytes < code_vmem_size) code_vmem_size = flag_max_bytes;
  }

1794
  // Try up to two times; getting rid of dead JSArrayBuffer allocations might
1795 1796 1797
  // require two GCs because the first GC maybe incremental and may have
  // floating garbage.
  static constexpr int kAllocationRetries = 2;
1798
  VirtualMemory code_space;
1799
  for (int retries = 0;; ++retries) {
1800 1801
    code_space = TryAllocate(code_vmem_size);
    if (code_space.IsReserved()) break;
1802
    if (retries == kAllocationRetries) {
1803
      V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
1804
      UNREACHABLE();
1805 1806 1807 1808 1809 1810
    }
    // Run one GC, then try the allocation again.
    isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
                                                true);
  }

1811 1812 1813
  Address start = code_space.address();
  size_t size = code_space.size();
  Address end = code_space.end();
1814
  std::shared_ptr<NativeModule> ret;
1815 1816
  new NativeModule(engine, enabled, std::move(code_space), std::move(module),
                   isolate->async_counters(), &ret);
1817 1818
  // The constructor initialized the shared_ptr.
  DCHECK_NOT_NULL(ret);
1819
  TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
1820
             size);
1821

1822 1823
  base::MutexGuard lock(&native_modules_mutex_);
  lookup_map_.insert(std::make_pair(start, std::make_pair(end, ret.get())));
1824
  return ret;
1825 1826
}

1827 1828 1829
void NativeModule::SampleCodeSize(
    Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
  size_t code_size = sampling_time == kSampling
1830 1831
                         ? code_allocator_.committed_code_space()
                         : code_allocator_.generated_code_size();
1832 1833 1834 1835 1836 1837
  int code_size_mb = static_cast<int>(code_size / MB);
  Histogram* histogram = nullptr;
  switch (sampling_time) {
    case kAfterBaseline:
      histogram = counters->wasm_module_code_size_mb_after_baseline();
      break;
1838 1839 1840
    case kAfterTopTier:
      histogram = counters->wasm_module_code_size_mb_after_top_tier();
      break;
1841
    case kSampling: {
1842
      histogram = counters->wasm_module_code_size_mb();
1843 1844 1845
      // If this is a wasm module of >= 2MB, also sample the freed code size,
      // absolute and relative. Code GC does not happen on asm.js modules, and
      // small modules will never trigger GC anyway.
1846
      size_t generated_size = code_allocator_.generated_code_size();
1847 1848 1849 1850 1851 1852 1853
      if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
        size_t freed_size = code_allocator_.freed_code_size();
        DCHECK_LE(freed_size, generated_size);
        int freed_percent = static_cast<int>(100 * freed_size / generated_size);
        counters->wasm_module_freed_code_size_percent()->AddSample(
            freed_percent);
      }
1854
      break;
1855
    }
1856 1857 1858 1859
  }
  histogram->AddSample(code_size_mb);
}

1860 1861 1862 1863
std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
    WasmCompilationResult result) {
  std::vector<std::unique_ptr<WasmCode>> code = AddCompiledCode({&result, 1});
  return std::move(code[0]);
1864
}
1865

1866
std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
1867
    Vector<WasmCompilationResult> results) {
1868
  DCHECK(!results.empty());
1869 1870 1871 1872 1873 1874
  // First, allocate code space for all the results.
  size_t total_code_space = 0;
  for (auto& result : results) {
    DCHECK(result.succeeded());
    total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
  }
1875 1876
  Vector<byte> code_space =
      code_allocator_.AllocateForCode(this, total_code_space);
1877
  // Lookup the jump tables to use once, then use for all code objects.
1878
  auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
1879 1880 1881 1882 1883

  std::vector<std::unique_ptr<WasmCode>> generated_code;
  generated_code.reserve(results.size());

  // Now copy the generated code into the code space and relocate it.
1884
  CODE_SPACE_WRITE_SCOPE
1885 1886 1887 1888 1889 1890 1891
  for (auto& result : results) {
    DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
    size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
    Vector<byte> this_code_space = code_space.SubVector(0, code_size);
    code_space += code_size;
    generated_code.emplace_back(AddCodeWithCodeSpace(
        result.func_index, result.code_desc, result.frame_slot_count,
1892 1893 1894
        result.tagged_parameter_slots,
        result.protected_instructions_data.as_vector(),
        result.source_positions.as_vector(), GetCodeKind(result),
1895 1896
        result.result_tier, result.for_debugging, this_code_space,
        jump_tables));
1897 1898 1899
  }
  DCHECK_EQ(0, code_space.size());

1900
  return generated_code;
1901 1902
}

1903 1904 1905
void NativeModule::SetTieringState(TieringState new_tiering_state) {
  // Do not tier down asm.js (just never change the tiering state).
  if (module()->origin != kWasmOrigin) return;
1906 1907

  base::MutexGuard lock(&allocation_mutex_);
1908
  tiering_state_ = new_tiering_state;
1909 1910 1911 1912
}

bool NativeModule::IsTieredDown() {
  base::MutexGuard lock(&allocation_mutex_);
1913
  return tiering_state_ == kTieredDown;
1914 1915
}

1916
void NativeModule::RecompileForTiering() {
1917 1918 1919 1920
  // Read the tiering state under the lock, then trigger recompilation after
  // releasing the lock. If the tiering state was changed when the triggered
  // compilation units finish, code installation will handle that correctly.
  TieringState current_state;
1921 1922
  {
    base::MutexGuard lock(&allocation_mutex_);
1923
    current_state = tiering_state_;
1924
  }
1925
  RecompileNativeModule(this, current_state);
1926 1927
}

1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
std::vector<int> NativeModule::FindFunctionsToRecompile(
    TieringState new_tiering_state) {
  base::MutexGuard guard(&allocation_mutex_);
  std::vector<int> function_indexes;
  int imported = module()->num_imported_functions;
  int declared = module()->num_declared_functions;
  for (int slot_index = 0; slot_index < declared; ++slot_index) {
    int function_index = imported + slot_index;
    WasmCode* code = code_table_[slot_index];
    bool code_is_good = new_tiering_state == kTieredDown
                            ? code && code->for_debugging()
                            : code && code->tier() == ExecutionTier::kTurbofan;
    if (!code_is_good) function_indexes.push_back(function_index);
  }
  return function_indexes;
}

1945
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
1946 1947
  // Free the code space.
  code_allocator_.FreeCode(codes);
1948

1949 1950 1951 1952 1953 1954 1955 1956 1957
  DebugInfo* debug_info = nullptr;
  {
    base::MutexGuard guard(&allocation_mutex_);
    debug_info = debug_info_.get();
    // Free the {WasmCode} objects. This will also unregister trap handler data.
    for (WasmCode* code : codes) {
      DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
      owned_code_.erase(code->instruction_start());
    }
1958
  }
1959 1960 1961
  // Remove debug side tables for all removed code objects, after releasing our
  // lock. This is to avoid lock order inversion.
  if (debug_info) debug_info->RemoveDebugSideTables(codes);
1962 1963
}

1964 1965 1966 1967
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
  return code_allocator_.GetNumCodeSpaces();
}

1968 1969 1970 1971 1972
bool NativeModule::HasDebugInfo() const {
  base::MutexGuard guard(&allocation_mutex_);
  return debug_info_ != nullptr;
}

1973 1974 1975 1976 1977 1978
DebugInfo* NativeModule::GetDebugInfo() {
  base::MutexGuard guard(&allocation_mutex_);
  if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
  return debug_info_.get();
}

1979 1980
void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
                                       size_t committed_size) {
1981
  base::MutexGuard lock(&native_modules_mutex_);
1982
  for (auto& code_space : owned_code_space) {
1983
    DCHECK(code_space.IsReserved());
1984
    TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
1985
               code_space.address(), code_space.end(), code_space.size());
1986

1987
#if defined(V8_OS_WIN64)
1988
    if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
1989 1990 1991
      win64_unwindinfo::UnregisterNonABICompliantCodeRange(
          reinterpret_cast<void*>(code_space.address()));
    }
1992
#endif  // V8_OS_WIN64
1993

1994
    lookup_map_.erase(code_space.address());
1995
    BackingStore::ReleaseReservation(code_space.size());
1996 1997
    code_space.Free();
    DCHECK(!code_space.IsReserved());
1998
  }
1999

2000
  DCHECK(IsAligned(committed_size, CommitPageSize()));
2001 2002 2003 2004 2005 2006 2007
  // TODO(v8:8462): Remove this once perf supports remapping.
  if (!FLAG_perf_prof) {
    size_t old_committed =
        total_committed_code_space_.fetch_sub(committed_size);
    DCHECK_LE(committed_size, old_committed);
    USE(old_committed);
  }
2008 2009
}

2010
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
2011
  base::MutexGuard lock(&native_modules_mutex_);
2012 2013 2014 2015 2016
  if (lookup_map_.empty()) return nullptr;

  auto iter = lookup_map_.upper_bound(pc);
  if (iter == lookup_map_.begin()) return nullptr;
  --iter;
2017 2018
  Address region_start = iter->first;
  Address region_end = iter->second.first;
2019 2020 2021
  NativeModule* candidate = iter->second.second;

  DCHECK_NOT_NULL(candidate);
2022
  return region_start <= pc && pc < region_end ? candidate : nullptr;
2023 2024 2025 2026 2027
}

WasmCode* WasmCodeManager::LookupCode(Address pc) const {
  NativeModule* candidate = LookupNativeModule(pc);
  return candidate ? candidate->Lookup(pc) : nullptr;
2028 2029
}

2030
// TODO(v8:7424): Code protection scopes are not yet supported with shared code
2031
// enabled and need to be revisited.
2032 2033 2034
NativeModuleModificationScope::NativeModuleModificationScope(
    NativeModule* native_module)
    : native_module_(native_module) {
2035 2036
  if (FLAG_wasm_write_protect_code_memory && native_module_ &&
      (native_module_->modification_scope_depth_++) == 0) {
2037 2038 2039
    bool success = native_module_->SetExecutable(false);
    CHECK(success);
  }
2040 2041 2042
}

NativeModuleModificationScope::~NativeModuleModificationScope() {
2043 2044
  if (FLAG_wasm_write_protect_code_memory && native_module_ &&
      (native_module_->modification_scope_depth_--) == 1) {
2045 2046 2047
    bool success = native_module_->SetExecutable(true);
    CHECK(success);
  }
2048 2049
}

2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
namespace {
thread_local WasmCodeRefScope* current_code_refs_scope = nullptr;
}  // namespace

WasmCodeRefScope::WasmCodeRefScope()
    : previous_scope_(current_code_refs_scope) {
  current_code_refs_scope = this;
}

WasmCodeRefScope::~WasmCodeRefScope() {
  DCHECK_EQ(this, current_code_refs_scope);
  current_code_refs_scope = previous_scope_;
2062 2063 2064 2065
  std::vector<WasmCode*> code_ptrs;
  code_ptrs.reserve(code_ptrs_.size());
  code_ptrs.assign(code_ptrs_.begin(), code_ptrs_.end());
  WasmCode::DecrementRefCount(VectorOf(code_ptrs));
2066 2067 2068 2069
}

// static
void WasmCodeRefScope::AddRef(WasmCode* code) {
2070
  DCHECK_NOT_NULL(code);
2071
  WasmCodeRefScope* current_scope = current_code_refs_scope;
2072
  DCHECK_NOT_NULL(current_scope);
2073 2074 2075 2076 2077
  auto entry = current_scope->code_ptrs_.insert(code);
  // If we added a new entry, increment the ref counter.
  if (entry.second) code->IncRef();
}

2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
const char* GetRuntimeStubName(WasmCode::RuntimeStubId stub_id) {
#define RUNTIME_STUB_NAME(Name) #Name,
#define RUNTIME_STUB_NAME_TRAP(Name) "ThrowWasm" #Name,
  constexpr const char* runtime_stub_names[] = {WASM_RUNTIME_STUB_LIST(
      RUNTIME_STUB_NAME, RUNTIME_STUB_NAME_TRAP) "<unknown>"};
#undef RUNTIME_STUB_NAME
#undef RUNTIME_STUB_NAME_TRAP
  STATIC_ASSERT(arraysize(runtime_stub_names) ==
                WasmCode::kRuntimeStubCount + 1);

  DCHECK_GT(arraysize(runtime_stub_names), stub_id);
  return runtime_stub_names[stub_id];
}

2092 2093 2094
}  // namespace wasm
}  // namespace internal
}  // namespace v8
2095
#undef TRACE_HEAP