wasm-engine.cc 62.6 KB
Newer Older
1 2 3 4 5
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/wasm/wasm-engine.h"
6

7
#include "src/base/functional.h"
8
#include "src/base/platform/time.h"
9
#include "src/common/globals.h"
10
#include "src/diagnostics/code-tracer.h"
11
#include "src/diagnostics/compilation-statistics.h"
12
#include "src/execution/frames.h"
13
#include "src/execution/v8threads.h"
14
#include "src/logging/counters.h"
15
#include "src/objects/heap-number.h"
16
#include "src/objects/js-promise.h"
17
#include "src/objects/objects-inl.h"
18
#include "src/strings/string-hasher-inl.h"
19
#include "src/utils/ostreams.h"
20
#include "src/wasm/function-compiler.h"
21
#include "src/wasm/memory-protection-key.h"
22
#include "src/wasm/module-compiler.h"
Marja Hölttä's avatar
Marja Hölttä committed
23
#include "src/wasm/module-decoder.h"
24
#include "src/wasm/module-instantiate.h"
Marja Hölttä's avatar
Marja Hölttä committed
25
#include "src/wasm/streaming-decoder.h"
26
#include "src/wasm/wasm-debug.h"
27
#include "src/wasm/wasm-limits.h"
28
#include "src/wasm/wasm-objects-inl.h"
29

30
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
31
#include "src/base/platform/wrappers.h"
32 33 34
#include "src/debug/wasm/gdb-server/gdb-server.h"
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

35 36 37 38
namespace v8 {
namespace internal {
namespace wasm {

39 40 41 42 43
#define TRACE_CODE_GC(...)                                         \
  do {                                                             \
    if (FLAG_trace_wasm_code_gc) PrintF("[wasm-gc] " __VA_ARGS__); \
  } while (false)

44
namespace {
45 46 47
// A task to log a set of {WasmCode} objects in an isolate. It does not own any
// data itself, since it is owned by the platform, so lifetime is not really
// bound to the wasm engine.
48 49
class LogCodesTask : public Task {
 public:
50 51 52 53 54 55
  LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate,
               WasmEngine* engine)
      : mutex_(mutex),
        task_slot_(task_slot),
        isolate_(isolate),
        engine_(engine) {
56 57 58 59
    DCHECK_NOT_NULL(task_slot);
    DCHECK_NOT_NULL(isolate);
  }

60
  ~LogCodesTask() override {
61 62 63
    // If the platform deletes this task before executing it, we also deregister
    // it to avoid use-after-free from still-running background threads.
    if (!cancelled()) DeregisterTask();
64
  }
65 66

  void Run() override {
67 68
    if (cancelled()) return;
    DeregisterTask();
69
    engine_->LogOutstandingCodesForIsolate(isolate_);
70 71 72 73 74 75 76 77
  }

  void Cancel() {
    // Cancel will only be called on Isolate shutdown, which happens on the
    // Isolate's foreground thread. Thus no synchronization needed.
    isolate_ = nullptr;
  }

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
  bool cancelled() const { return isolate_ == nullptr; }

  void DeregisterTask() {
    // The task will only be deregistered from the foreground thread (executing
    // this task or calling its destructor), thus we do not need synchronization
    // on this field access.
    if (task_slot_ == nullptr) return;  // already deregistered.
    // Remove this task from the {IsolateInfo} in the engine. The next
    // logging request will allocate and schedule a new task.
    base::MutexGuard guard(mutex_);
    DCHECK_EQ(this, *task_slot_);
    *task_slot_ = nullptr;
    task_slot_ = nullptr;
  }

93 94 95 96
 private:
  // The mutex of the WasmEngine.
  base::Mutex* const mutex_;
  // The slot in the WasmEngine where this LogCodesTask is stored. This is
97 98
  // cleared by this task before execution or on task destruction.
  LogCodesTask** task_slot_;
99
  Isolate* isolate_;
100
  WasmEngine* const engine_;
101
};
102

103 104 105 106 107 108 109 110 111 112 113 114
void CheckNoArchivedThreads(Isolate* isolate) {
  class ArchivedThreadsVisitor : public ThreadVisitor {
    void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
      // Archived threads are rarely used, and not combined with Wasm at the
      // moment. Implement this and test it properly once we have a use case for
      // that.
      FATAL("archived threads in combination with wasm not supported");
    }
  } archived_threads_visitor;
  isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
}

115
class WasmGCForegroundTask : public CancelableTask {
116
 public:
117 118
  explicit WasmGCForegroundTask(Isolate* isolate)
      : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
119

120
  void RunInternal() final {
121
    WasmEngine* engine = isolate_->wasm_engine();
122 123 124
    // The stack can contain live frames, for instance when this is invoked
    // during a pause or a breakpoint.
    engine->ReportLiveCodeFromStackForGC(isolate_);
125 126 127 128 129 130
  }

 private:
  Isolate* isolate_;
};

131 132
class WeakScriptHandle {
 public:
133
  explicit WeakScriptHandle(Handle<Script> script) : script_id_(script->id()) {
134 135
    DCHECK(script->name().IsString() || script->name().IsUndefined());
    if (script->name().IsString()) {
136
      std::unique_ptr<char[]> source_url =
137
          String::cast(script->name()).ToCString();
138 139 140
      // Convert from {unique_ptr} to {shared_ptr}.
      source_url_ = {source_url.release(), source_url.get_deleter()};
    }
141
    auto global_handle =
142
        script->GetIsolate()->global_handles()->Create(*script);
143 144 145 146
    location_ = std::make_unique<Address*>(global_handle.location());
    GlobalHandles::MakeWeak(location_.get());
  }

147 148 149 150 151 152 153
  // Usually the destructor of this class should always be called after the weak
  // callback because the Script keeps the NativeModule alive. So we expect the
  // handle to be destroyed and the location to be reset already.
  // We cannot check this because of one exception. When the native module is
  // freed during isolate shutdown, the destructor will be called
  // first, and the callback will never be called.
  ~WeakScriptHandle() = default;
154

155
  WeakScriptHandle(WeakScriptHandle&&) V8_NOEXCEPT = default;
156

157 158 159
  Handle<Script> handle() const { return Handle<Script>(*location_); }

  int script_id() const { return script_id_; }
160

161 162
  const std::shared_ptr<const char>& source_url() const { return source_url_; }

163 164 165 166
 private:
  // Store the location in a unique_ptr so that its address stays the same even
  // when this object is moved/copied.
  std::unique_ptr<Address*> location_;
167 168 169 170

  // Store the script ID independent of the weak handle, such that it's always
  // available.
  int script_id_;
171 172 173 174 175 176 177

  // Similar for the source URL. We cannot dereference the Handle from arbitrary
  // threads, but we need the URL available for code logging.
  // The shared pointer is kept alive by unlogged code, even if this entry is
  // collected in the meantime.
  // TODO(chromium:1132260): Revisit this for huge URLs.
  std::shared_ptr<const char> source_url_;
178 179
};

180 181
}  // namespace

182 183 184 185
std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
    ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
  if (origin != kWasmOrigin) return nullptr;
  base::MutexGuard lock(&mutex_);
186 187
  size_t prefix_hash = PrefixHash(wire_bytes);
  NativeModuleCache::Key key{prefix_hash, wire_bytes};
188
  while (true) {
189
    auto it = map_.find(key);
190
    if (it == map_.end()) {
191 192 193 194 195 196 197
      // Even though this exact key is not in the cache, there might be a
      // matching prefix hash indicating that a streaming compilation is
      // currently compiling a module with the same prefix. {OnFinishedStream}
      // happens on the main thread too, so waiting for streaming compilation to
      // finish would create a deadlock. Instead, compile the module twice and
      // handle the conflict in {UpdateNativeModuleCache}.

198 199
      // Insert a {nullopt} entry to let other threads know that this
      // {NativeModule} is already being created on another thread.
200 201 202
      auto p = map_.emplace(key, base::nullopt);
      USE(p);
      DCHECK(p.second);
203 204
      return nullptr;
    }
205 206
    if (it->second.has_value()) {
      if (auto shared_native_module = it->second.value().lock()) {
207
        DCHECK_EQ(shared_native_module->wire_bytes(), wire_bytes);
208 209 210
        return shared_native_module;
      }
    }
211 212
    // TODO(11858): This deadlocks in predictable mode, because there is only a
    // single thread.
213 214 215 216
    cache_cv_.Wait(&mutex_);
  }
}

217 218 219 220
bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
  base::MutexGuard lock(&mutex_);
  auto it = map_.lower_bound(Key{prefix_hash, {}});
  if (it != map_.end() && it->first.prefix_hash == prefix_hash) {
221 222
    DCHECK_IMPLIES(!it->first.bytes.empty(),
                   PrefixHash(it->first.bytes) == prefix_hash);
223 224 225
    return false;
  }
  Key key{prefix_hash, {}};
226 227 228
  DCHECK_EQ(0, map_.count(key));
  map_.emplace(key, base::nullopt);
  return true;
229 230 231 232
}

void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
  base::MutexGuard lock(&mutex_);
233 234 235
  Key key{prefix_hash, {}};
  DCHECK_EQ(1, map_.count(key));
  map_.erase(key);
236 237 238 239 240
  cache_cv_.NotifyAll();
}

std::shared_ptr<NativeModule> NativeModuleCache::Update(
    std::shared_ptr<NativeModule> native_module, bool error) {
241
  DCHECK_NOT_NULL(native_module);
242
  if (native_module->module()->origin != kWasmOrigin) return native_module;
243
  Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
244
  DCHECK(!wire_bytes.empty());
245
  size_t prefix_hash = PrefixHash(native_module->wire_bytes());
246
  base::MutexGuard lock(&mutex_);
247 248 249 250 251 252 253
  map_.erase(Key{prefix_hash, {}});
  const Key key{prefix_hash, wire_bytes};
  auto it = map_.find(key);
  if (it != map_.end()) {
    if (it->second.has_value()) {
      auto conflicting_module = it->second.value().lock();
      if (conflicting_module != nullptr) {
254
        DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
255 256 257 258 259
        return conflicting_module;
      }
    }
    map_.erase(it);
  }
260
  if (!error) {
261 262 263 264 265 266 267
    // The key now points to the new native module's owned copy of the bytes,
    // so that it stays valid until the native module is freed and erased from
    // the map.
    auto p = map_.emplace(
        key, base::Optional<std::weak_ptr<NativeModule>>(native_module));
    USE(p);
    DCHECK(p.second);
268 269
  }
  cache_cv_.NotifyAll();
270
  return native_module;
271 272 273
}

void NativeModuleCache::Erase(NativeModule* native_module) {
274 275
  if (native_module->module()->origin != kWasmOrigin) return;
  // Happens in some tests where bytes are set directly.
276
  if (native_module->wire_bytes().empty()) return;
277
  base::MutexGuard lock(&mutex_);
278
  size_t prefix_hash = PrefixHash(native_module->wire_bytes());
279 280
  map_.erase(Key{prefix_hash, native_module->wire_bytes()});
  cache_cv_.NotifyAll();
281 282
}

283 284
// static
size_t NativeModuleCache::WireBytesHash(Vector<const uint8_t> bytes) {
285 286 287 288 289
  return StringHasher::HashSequentialString(
      reinterpret_cast<const char*>(bytes.begin()), bytes.length(),
      kZeroHashSeed);
}

290 291 292 293 294 295 296 297 298 299 300 301
// static
size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
  // Compute the hash as a combined hash of the sections up to the code section
  // header, to mirror the way streaming compilation does it.
  Decoder decoder(wire_bytes.begin(), wire_bytes.end());
  decoder.consume_bytes(8, "module header");
  size_t hash = NativeModuleCache::WireBytesHash(wire_bytes.SubVector(0, 8));
  SectionCode section_id = SectionCode::kUnknownSectionCode;
  while (decoder.ok() && decoder.more()) {
    section_id = static_cast<SectionCode>(decoder.consume_u8());
    uint32_t section_size = decoder.consume_u32v("section size");
    if (section_id == SectionCode::kCodeSectionCode) {
302 303 304 305 306 307
      uint32_t num_functions = decoder.consume_u32v("num functions");
      // If {num_functions} is 0, the streaming decoder skips the section. Do
      // the same here to ensure hashes are consistent.
      if (num_functions != 0) {
        hash = base::hash_combine(hash, section_size);
      }
308 309 310 311 312 313 314 315 316 317 318
      break;
    }
    const uint8_t* payload_start = decoder.pc();
    decoder.consume_bytes(section_size, "section payload");
    size_t section_hash = NativeModuleCache::WireBytesHash(
        Vector<const uint8_t>(payload_start, section_size));
    hash = base::hash_combine(hash, section_hash);
  }
  return hash;
}

319
struct WasmEngine::CurrentGCInfo {
320 321 322 323 324
  explicit CurrentGCInfo(int8_t gc_sequence_index)
      : gc_sequence_index(gc_sequence_index) {
    DCHECK_NE(0, gc_sequence_index);
  }

325 326 327 328 329 330 331
  // Set of isolates that did not scan their stack yet for used WasmCode, and
  // their scheduled foreground task.
  std::unordered_map<Isolate*, WasmGCForegroundTask*> outstanding_isolates;

  // Set of dead code. Filled with all potentially dead code on initialization.
  // Code that is still in-use is removed by the individual isolates.
  std::unordered_set<WasmCode*> dead_code;
332

333 334 335 336 337
  // The number of GCs triggered in the native module that triggered this GC.
  // This is stored in the histogram for each participating isolate during
  // execution of that isolate's foreground task.
  const int8_t gc_sequence_index;

338 339
  // If during this GC, another GC was requested, we skipped that other GC (we
  // only run one GC at a time). Remember though to trigger another one once
340 341 342 343
  // this one finishes. {next_gc_sequence_index} is 0 if no next GC is needed,
  // and >0 otherwise. It stores the {num_code_gcs_triggered} of the native
  // module which triggered the next GC.
  int8_t next_gc_sequence_index = 0;
344 345 346 347

  // The start time of this GC; used for tracing and sampled via {Counters}.
  // Can be null ({TimeTicks::IsNull()}) if timer is not high resolution.
  base::TimeTicks start_time;
348 349
};

350
struct WasmEngine::IsolateInfo {
351
  explicit IsolateInfo(Isolate* isolate)
352
      : log_codes(WasmCode::ShouldBeLogged(isolate)),
353 354
        async_counters(isolate->async_counters()),
        wrapper_compilation_barrier_(std::make_shared<OperationsBarrier>()) {
355 356 357 358 359
    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
    v8::Platform* platform = V8::GetCurrentPlatform();
    foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
  }

360 361 362 363 364 365 366 367
#ifdef DEBUG
  ~IsolateInfo() {
    // Before destructing, the {WasmEngine} must have cleared outstanding code
    // to log.
    DCHECK_EQ(0, code_to_log.size());
  }
#endif

368
  // All native modules that are being used by this Isolate.
369
  std::unordered_set<NativeModule*> native_modules;
370

371 372 373
  // Scripts created for each native module in this isolate.
  std::unordered_map<NativeModule*, WeakScriptHandle> scripts;

374 375 376
  // Caches whether code needs to be logged on this isolate.
  bool log_codes;

377 378 379
  // The currently scheduled LogCodesTask.
  LogCodesTask* log_codes_task = nullptr;

380 381 382 383 384 385 386
  // Maps script ID to vector of code objects that still need to be logged, and
  // the respective source URL.
  struct CodeToLogPerScript {
    std::vector<WasmCode*> code;
    std::shared_ptr<const char> source_url;
  };
  std::unordered_map<int, CodeToLogPerScript> code_to_log;
387

388 389
  // The foreground task runner of the isolate (can be called from background).
  std::shared_ptr<v8::TaskRunner> foreground_task_runner;
390 391

  const std::shared_ptr<Counters> async_counters;
392 393 394

  // Keep new modules in tiered down state.
  bool keep_tiered_down = false;
395

396 397 398 399
  // Keep track whether we already added a sample for PKU support (we only want
  // one sample per Isolate).
  bool pku_support_sampled = false;

400 401 402 403
  // Elapsed time since last throw/rethrow/catch event.
  base::ElapsedTimer throw_timer;
  base::ElapsedTimer rethrow_timer;
  base::ElapsedTimer catch_timer;
404 405 406 407 408

  // Total number of exception events in this isolate.
  int throw_count = 0;
  int rethrow_count = 0;
  int catch_count = 0;
409 410 411 412 413 414

  // Operations barrier to synchronize on wrapper compilation on isolate
  // shutdown.
  // TODO(wasm): Remove this once we can use the generic js-to-wasm wrapper
  // everywhere.
  std::shared_ptr<OperationsBarrier> wrapper_compilation_barrier_;
415 416
};

417
struct WasmEngine::NativeModuleInfo {
418 419 420 421 422 423
  explicit NativeModuleInfo(std::weak_ptr<NativeModule> native_module)
      : weak_ptr(std::move(native_module)) {}

  // Weak pointer, to gain back a shared_ptr if needed.
  std::weak_ptr<NativeModule> weak_ptr;

424 425 426
  // Set of isolates using this NativeModule.
  std::unordered_set<Isolate*> isolates;

427 428 429 430
  // Set of potentially dead code. This set holds one ref for each code object,
  // until code is detected to be really dead. At that point, the ref count is
  // decremented and code is move to the {dead_code} set. If the code is finally
  // deleted, it is also removed from {dead_code}.
431
  std::unordered_set<WasmCode*> potentially_dead_code;
432 433 434 435

  // Code that is not being executed in any isolate any more, but the ref count
  // did not drop to zero yet.
  std::unordered_set<WasmCode*> dead_code;
436 437 438 439

  // Number of code GCs triggered because code in this native module became
  // potentially dead.
  int8_t num_code_gcs_triggered = 0;
440 441
};

442
WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
443

444
WasmEngine::~WasmEngine() {
445 446
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  // Synchronize on the GDB-remote thread, if running.
447
  gdb_server_.reset();
448 449
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

450
  operations_barrier_->CancelAndWait();
451

452
  // All AsyncCompileJobs have been canceled.
453
  DCHECK(async_compile_jobs_.empty());
454 455
  // All Isolates have been deregistered.
  DCHECK(isolates_.empty());
456
  // All NativeModules did die.
457
  DCHECK(native_modules_.empty());
458 459
  // Native module cache does not leak.
  DCHECK(native_module_cache_.empty());
460
}
461

462 463
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
                              const ModuleWireBytes& bytes) {
464
  TRACE_EVENT0("v8.wasm", "wasm.SyncValidate");
465 466
  // TODO(titzer): remove dependency on the isolate.
  if (bytes.start() == nullptr || bytes.length() == 0) return false;
467 468 469 470 471
  ModuleResult result = DecodeWasmModule(
      enabled, bytes.start(), bytes.end(), true, kWasmOrigin,
      isolate->counters(), isolate->metrics_recorder(),
      isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
      DecodingMethod::kSync, allocator());
472 473 474
  return result.ok();
}

475
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
476
    Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
477
    Vector<const byte> asm_js_offset_table_bytes,
478
    Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
479 480 481
  int compilation_id = next_compilation_id_.fetch_add(1);
  TRACE_EVENT1("v8.wasm", "wasm.SyncCompileTranslatedAsmJs", "id",
               compilation_id);
482 483 484
  ModuleOrigin origin = language_mode == LanguageMode::kSloppy
                            ? kAsmJsSloppyOrigin
                            : kAsmJsStrictOrigin;
485 486 487 488 489
  ModuleResult result = DecodeWasmModule(
      WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(), false, origin,
      isolate->counters(), isolate->metrics_recorder(),
      isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
      DecodingMethod::kSync, allocator());
490 491 492 493 494 495
  if (result.failed()) {
    // This happens once in a while when we have missed some limit check
    // in the asm parser. Output an error message to help diagnose, but crash.
    std::cout << result.error().message();
    UNREACHABLE();
  }
496

497 498 499
  result.value()->asm_js_offset_information =
      std::make_unique<AsmJsOffsetInformation>(asm_js_offset_table_bytes);

500
  // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
501 502
  // in {CompileToNativeModule}.
  Handle<FixedArray> export_wrappers;
503 504 505
  std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
      isolate, WasmFeatures::ForAsmjs(), thrower, std::move(result).value(),
      bytes, &export_wrappers, compilation_id);
506 507 508
  if (!native_module) return {};

  return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
509
                          uses_bitset);
510 511 512 513 514 515
}

Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
    Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
    Handle<Script> script) {
  std::shared_ptr<NativeModule> native_module =
516
      asm_wasm_data->managed_native_module().get();
517 518
  Handle<FixedArray> export_wrappers =
      handle(asm_wasm_data->export_wrappers(), isolate);
519 520
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
      isolate, std::move(native_module), script, export_wrappers);
521
  return module_object;
522 523 524
}

MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
525 526
    Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
    const ModuleWireBytes& bytes) {
527 528
  int compilation_id = next_compilation_id_.fetch_add(1);
  TRACE_EVENT1("v8.wasm", "wasm.SyncCompile", "id", compilation_id);
529 530 531 532 533
  ModuleResult result = DecodeWasmModule(
      enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
      isolate->counters(), isolate->metrics_recorder(),
      isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
      DecodingMethod::kSync, allocator());
534
  if (result.failed()) {
535
    thrower->CompileFailed(result.error());
536 537 538
    return {};
  }

539
  // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
540
  // in {CompileToNativeModule}.
541
  Handle<FixedArray> export_wrappers;
542 543 544
  std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
      isolate, enabled, thrower, std::move(result).value(), bytes,
      &export_wrappers, compilation_id);
545 546
  if (!native_module) return {};

547 548 549 550 551 552 553 554 555 556 557
#ifdef DEBUG
  // Ensure that code GC will check this isolate for live code.
  {
    base::MutexGuard lock(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module.get()));
    DCHECK_EQ(1, native_modules_.count(native_module.get()));
    DCHECK_EQ(1, native_modules_[native_module.get()]->isolates.count(isolate));
  }
#endif

558 559 560
  constexpr Vector<const char> kNoSourceUrl;
  Handle<Script> script =
      GetOrCreateScript(isolate, native_module, kNoSourceUrl);
561

562
  native_module->LogWasmCodes(isolate, *script);
563

564 565 566 567
  // Create the compiled module object and populate with compiled functions
  // and information needed at instantiation time. This object needs to be
  // serializable. Instantiation may occur off a deserialized version of this
  // object.
568 569 570 571 572 573
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
      isolate, std::move(native_module), script, export_wrappers);

  // Finish the Wasm script now and make it public to the debugger.
  isolate->debug()->OnAfterCompile(script);
  return module_object;
574 575 576 577 578 579
}

MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
    Isolate* isolate, ErrorThrower* thrower,
    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
    MaybeHandle<JSArrayBuffer> memory) {
580
  TRACE_EVENT0("v8.wasm", "wasm.SyncInstantiate");
581 582 583 584
  return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
                                     memory);
}

585 586 587
void WasmEngine::AsyncInstantiate(
    Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
588
  ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
589
  TRACE_EVENT0("v8.wasm", "wasm.AsyncInstantiate");
590 591
  // Instantiate a TryCatch so that caught exceptions won't progagate out.
  // They will still be set as pending exceptions on the isolate.
592
  // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
593 594 595 596 597
  // start function and report thrown exception explicitly via out argument.
  v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
  catcher.SetVerbose(false);
  catcher.SetCaptureMessage(false);

598 599
  MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
      isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
600 601 602 603 604 605

  if (!instance_object.is_null()) {
    resolver->OnInstantiationSucceeded(instance_object.ToHandleChecked());
    return;
  }

606 607 608
  if (isolate->has_pending_exception()) {
    // The JS code executed during instantiation has thrown an exception.
    // We have to move the exception to the promise chain.
609 610 611 612
    Handle<Object> exception(isolate->pending_exception(), isolate);
    isolate->clear_pending_exception();
    *isolate->external_caught_exception_address() = false;
    resolver->OnInstantiationFailed(exception);
613 614 615 616
    thrower.Reset();
  } else {
    DCHECK(thrower.error());
    resolver->OnInstantiationFailed(thrower.Reify());
617 618 619
  }
}

620
void WasmEngine::AsyncCompile(
621
    Isolate* isolate, const WasmFeatures& enabled,
622
    std::shared_ptr<CompilationResultResolver> resolver,
623 624
    const ModuleWireBytes& bytes, bool is_shared,
    const char* api_method_name_for_errors) {
625 626
  int compilation_id = next_compilation_id_.fetch_add(1);
  TRACE_EVENT1("v8.wasm", "wasm.AsyncCompile", "id", compilation_id);
627 628
  if (!FLAG_wasm_async_compilation) {
    // Asynchronous compilation disabled; fall back on synchronous compilation.
629
    ErrorThrower thrower(isolate, api_method_name_for_errors);
630 631 632 633
    MaybeHandle<WasmModuleObject> module_object;
    if (is_shared) {
      // Make a copy of the wire bytes to avoid concurrent modification.
      std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
634
      memcpy(copy.get(), bytes.start(), bytes.length());
635
      ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
636
      module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy);
637 638
    } else {
      // The wire bytes are not shared, OK to use them directly.
639
      module_object = SyncCompile(isolate, enabled, &thrower, bytes);
640 641
    }
    if (thrower.error()) {
642
      resolver->OnCompilationFailed(thrower.Reify());
643 644 645
      return;
    }
    Handle<WasmModuleObject> module = module_object.ToHandleChecked();
646
    resolver->OnCompilationSucceeded(module);
647 648 649 650 651
    return;
  }

  if (FLAG_wasm_test_streaming) {
    std::shared_ptr<StreamingDecoder> streaming_decoder =
652 653 654
        StartStreamingCompilation(
            isolate, enabled, handle(isolate->context(), isolate),
            api_method_name_for_errors, std::move(resolver));
655 656 657 658 659 660 661
    streaming_decoder->OnBytesReceived(bytes.module_bytes());
    streaming_decoder->Finish();
    return;
  }
  // Make a copy of the wire bytes in case the user program changes them
  // during asynchronous compilation.
  std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
662
  memcpy(copy.get(), bytes.start(), bytes.length());
663

664 665 666 667
  AsyncCompileJob* job = CreateAsyncCompileJob(
      isolate, enabled, std::move(copy), bytes.length(),
      handle(isolate->context(), isolate), api_method_name_for_errors,
      std::move(resolver), compilation_id);
668 669 670 671
  job->Start();
}

std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
672
    Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
673
    const char* api_method_name,
674
    std::shared_ptr<CompilationResultResolver> resolver) {
675 676 677
  int compilation_id = next_compilation_id_.fetch_add(1);
  TRACE_EVENT1("v8.wasm", "wasm.StartStreamingCompilation", "id",
               compilation_id);
678 679 680
  if (FLAG_wasm_async_compilation) {
    AsyncCompileJob* job = CreateAsyncCompileJob(
        isolate, enabled, std::unique_ptr<byte[]>(nullptr), 0, context,
681
        api_method_name, std::move(resolver), compilation_id);
682 683 684 685
    return job->CreateStreamingDecoder();
  }
  return StreamingDecoder::CreateSyncStreamingDecoder(
      isolate, enabled, context, api_method_name, std::move(resolver));
686 687
}

688
void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
689
                                 uint32_t function_index, ExecutionTier tier) {
690
  // Note we assume that "one-off" compilations can discard detected features.
691
  WasmFeatures detected = WasmFeatures::None();
692
  WasmCompilationUnit::CompileWasmFunction(
693
      isolate, native_module, &detected,
694
      &native_module->module()->functions[function_index], tier);
695 696
}

697
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
698
  std::vector<std::shared_ptr<NativeModule>> native_modules;
699 700
  {
    base::MutexGuard lock(&mutex_);
701 702
    if (isolates_[isolate]->keep_tiered_down) return;
    isolates_[isolate]->keep_tiered_down = true;
703 704 705 706
    for (auto* native_module : isolates_[isolate]->native_modules) {
      native_module->SetTieringState(kTieredDown);
      DCHECK_EQ(1, native_modules_.count(native_module));
      if (auto shared_ptr = native_modules_[native_module]->weak_ptr.lock()) {
707 708
        native_modules.emplace_back(std::move(shared_ptr));
      }
709 710
    }
  }
711
  for (auto& native_module : native_modules) {
712
    native_module->RecompileForTiering();
713 714 715
  }
}

716
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
717
  // Only trigger recompilation after releasing the mutex, otherwise we risk
718 719 720
  // deadlocks because of lock inversion. The bool tells whether the module
  // needs recompilation for tier up.
  std::vector<std::pair<std::shared_ptr<NativeModule>, bool>> native_modules;
721 722 723
  {
    base::MutexGuard lock(&mutex_);
    isolates_[isolate]->keep_tiered_down = false;
724
    auto test_can_tier_up = [this](NativeModule* native_module) {
725 726 727
      DCHECK_EQ(1, native_modules_.count(native_module));
      for (auto* isolate : native_modules_[native_module]->isolates) {
        DCHECK_EQ(1, isolates_.count(isolate));
728
        if (isolates_[isolate]->keep_tiered_down) return false;
729
      }
730
      return true;
731
    };
732
    for (auto* native_module : isolates_[isolate]->native_modules) {
733 734 735
      DCHECK_EQ(1, native_modules_.count(native_module));
      auto shared_ptr = native_modules_[native_module]->weak_ptr.lock();
      if (!shared_ptr) continue;  // The module is not used any more.
736
      if (!native_module->IsTieredDown()) continue;
737
      // Only start tier-up if no other isolate needs this module in tiered
738
      // down state.
739 740 741
      bool tier_up = test_can_tier_up(native_module);
      if (tier_up) native_module->SetTieringState(kTieredUp);
      native_modules.emplace_back(std::move(shared_ptr), tier_up);
742
    }
743
  }
744 745 746 747 748 749 750 751
  for (auto& entry : native_modules) {
    auto& native_module = entry.first;
    bool tier_up = entry.second;
    // Remove all breakpoints set by this isolate.
    if (native_module->HasDebugInfo()) {
      native_module->GetDebugInfo()->RemoveIsolate(isolate);
    }
    if (tier_up) native_module->RecompileForTiering();
752 753 754
  }
}

755 756
std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
    Handle<WasmModuleObject> module_object) {
757
  return module_object->shared_native_module();
758 759
}

760 761 762
namespace {
Handle<Script> CreateWasmScript(Isolate* isolate,
                                std::shared_ptr<NativeModule> native_module,
763
                                Vector<const char> source_url) {
764
  Handle<Script> script =
765
      isolate->factory()->NewScript(isolate->factory()->undefined_value());
766 767 768 769 770 771
  script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
  script->set_context_data(isolate->native_context()->debug_context_id());
  script->set_type(Script::TYPE_WASM);

  Vector<const uint8_t> wire_bytes = native_module->wire_bytes();

772 773 774 775
  // The source URL of the script is
  // - the original source URL if available (from the streaming API),
  // - wasm://wasm/<module name>-<hash> if a module name has been set, or
  // - wasm://wasm/<hash> otherwise.
776
  const WasmModule* module = native_module->module();
777
  Handle<String> url_str;
778
  if (!source_url.empty()) {
779 780 781
    url_str = isolate->factory()
                  ->NewStringFromUtf8(source_url, AllocationType::kOld)
                  .ToHandleChecked();
782
  } else {
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
    int hash = StringHasher::HashSequentialString(
        reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
        kZeroHashSeed);

    EmbeddedVector<char, 32> buffer;
    if (module->name.is_empty()) {
      // Build the URL in the form "wasm://wasm/<hash>".
      int url_len = SNPrintF(buffer, "wasm://wasm/%08x", hash);
      DCHECK(url_len >= 0 && url_len < buffer.length());
      url_str = isolate->factory()
                    ->NewStringFromUtf8(buffer.SubVector(0, url_len),
                                        AllocationType::kOld)
                    .ToHandleChecked();
    } else {
      // Build the URL in the form "wasm://wasm/<module name>-<hash>".
      int hash_len = SNPrintF(buffer, "-%08x", hash);
      DCHECK(hash_len >= 0 && hash_len < buffer.length());
      Handle<String> prefix =
          isolate->factory()->NewStringFromStaticChars("wasm://wasm/");
      Handle<String> module_name =
          WasmModuleObject::ExtractUtf8StringFromModuleBytes(
              isolate, wire_bytes, module->name, kNoInternalize);
      Handle<String> hash_str =
          isolate->factory()
              ->NewStringFromUtf8(buffer.SubVector(0, hash_len))
              .ToHandleChecked();
      // Concatenate the three parts.
      url_str = isolate->factory()
                    ->NewConsString(prefix, module_name)
                    .ToHandleChecked();
      url_str = isolate->factory()
                    ->NewConsString(url_str, hash_str)
                    .ToHandleChecked();
    }
817
  }
818
  script->set_name(*url_str);
819

820
  const WasmDebugSymbols& debug_symbols = module->debug_symbols;
821 822 823 824
  if (debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
      !debug_symbols.external_url.is_empty()) {
    Vector<const char> external_url =
        ModuleWireBytes(wire_bytes).GetNameOrNull(debug_symbols.external_url);
825
    MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
826
        external_url, AllocationType::kOld);
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
    script->set_source_mapping_url(*src_map_str.ToHandleChecked());
  }

  // Use the given shared {NativeModule}, but increase its reference count by
  // allocating a new {Managed<T>} that the {Script} references.
  size_t code_size_estimate = native_module->committed_code_space();
  size_t memory_estimate =
      code_size_estimate +
      wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
  Handle<Managed<wasm::NativeModule>> managed_native_module =
      Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
                                                 std::move(native_module));
  script->set_wasm_managed_native_module(*managed_native_module);
  script->set_wasm_breakpoint_infos(ReadOnlyRoots(isolate).empty_fixed_array());
  script->set_wasm_weak_instance_list(
      ReadOnlyRoots(isolate).empty_weak_array_list());
  return script;
}
}  // namespace

847
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
848 849
    Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module,
    Vector<const char> source_url) {
850
  DCHECK_EQ(this, shared_native_module->engine());
851 852
  NativeModule* native_module = shared_native_module.get();
  ModuleWireBytes wire_bytes(native_module->wire_bytes());
853 854
  Handle<Script> script =
      GetOrCreateScript(isolate, shared_native_module, source_url);
855 856
  Handle<FixedArray> export_wrappers;
  CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
857
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
858
      isolate, std::move(shared_native_module), script, export_wrappers);
859 860 861
  {
    base::MutexGuard lock(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
862
    isolates_[isolate]->native_modules.insert(native_module);
863 864
    DCHECK_EQ(1, native_modules_.count(native_module));
    native_modules_[native_module]->isolates.insert(isolate);
865
  }
866

867 868
  // Finish the Wasm script now and make it public to the debugger.
  isolate->debug()->OnAfterCompile(script);
869 870 871
  return module_object;
}

872
CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
873
  base::MutexGuard guard(&mutex_);
874 875 876 877 878 879 880
  if (compilation_stats_ == nullptr) {
    compilation_stats_.reset(new CompilationStatistics());
  }
  return compilation_stats_.get();
}

void WasmEngine::DumpAndResetTurboStatistics() {
881
  base::MutexGuard guard(&mutex_);
882 883 884 885 886 887 888
  if (compilation_stats_ != nullptr) {
    StdoutStream os;
    os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
  }
  compilation_stats_.reset();
}

889
CodeTracer* WasmEngine::GetCodeTracer() {
890
  base::MutexGuard guard(&mutex_);
891 892 893 894
  if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
  return code_tracer_.get();
}

895
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
896 897
    Isolate* isolate, const WasmFeatures& enabled,
    std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
898
    const char* api_method_name,
899
    std::shared_ptr<CompilationResultResolver> resolver, int compilation_id) {
900 901 902
  Handle<Context> incumbent_context = isolate->GetIncumbentContext();
  AsyncCompileJob* job = new AsyncCompileJob(
      isolate, enabled, std::move(bytes_copy), length, context,
903
      incumbent_context, api_method_name, std::move(resolver), compilation_id);
904
  // Pass ownership to the unique_ptr in {async_compile_jobs_}.
905
  base::MutexGuard guard(&mutex_);
906
  async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
907 908 909
  return job;
}

910 911
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
    AsyncCompileJob* job) {
912
  base::MutexGuard guard(&mutex_);
913 914
  auto item = async_compile_jobs_.find(job);
  DCHECK(item != async_compile_jobs_.end());
915
  std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
916
  async_compile_jobs_.erase(item);
917
  return result;
918 919
}

920
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
921
  base::MutexGuard guard(&mutex_);
922
  DCHECK_EQ(1, isolates_.count(isolate));
923
  for (auto& entry : async_compile_jobs_) {
924 925 926 927 928
    if (entry.first->isolate() == isolate) return true;
  }
  return false;
}

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
  // Under the mutex get all jobs to delete. Then delete them without holding
  // the mutex, such that deletion can reenter the WasmEngine.
  std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
  {
    base::MutexGuard guard(&mutex_);
    for (auto it = async_compile_jobs_.begin();
         it != async_compile_jobs_.end();) {
      if (!it->first->context().is_identical_to(context)) {
        ++it;
        continue;
      }
      jobs_to_delete.push_back(std::move(it->second));
      it = async_compile_jobs_.erase(it);
    }
  }
}

947
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
948 949 950
  // Under the mutex get all jobs to delete. Then delete them without holding
  // the mutex, such that deletion can reenter the WasmEngine.
  std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
951 952
  std::vector<std::weak_ptr<NativeModule>> modules_in_isolate;
  std::shared_ptr<OperationsBarrier> wrapper_compilation_barrier;
953 954
  {
    base::MutexGuard guard(&mutex_);
955 956
    for (auto it = async_compile_jobs_.begin();
         it != async_compile_jobs_.end();) {
957 958 959 960 961
      if (it->first->isolate() != isolate) {
        ++it;
        continue;
      }
      jobs_to_delete.push_back(std::move(it->second));
962
      it = async_compile_jobs_.erase(it);
963
    }
964 965 966 967 968 969 970
    DCHECK_EQ(1, isolates_.count(isolate));
    auto* isolate_info = isolates_[isolate].get();
    wrapper_compilation_barrier = isolate_info->wrapper_compilation_barrier_;
    for (auto* native_module : isolate_info->native_modules) {
      DCHECK_EQ(1, native_modules_.count(native_module));
      modules_in_isolate.emplace_back(native_modules_[native_module]->weak_ptr);
    }
971
  }
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991

  // All modules that have not finished initial compilation yet cannot be
  // shared with other isolates. Hence we cancel their compilation. In
  // particular, this will cancel wrapper compilation which is bound to this
  // isolate (this would be a UAF otherwise).
  for (auto& weak_module : modules_in_isolate) {
    if (auto shared_module = weak_module.lock()) {
      shared_module->compilation_state()->CancelInitialCompilation();
    }
  }

  // After cancelling, wait for all current wrapper compilation to actually
  // finish.
  wrapper_compilation_barrier->CancelAndWait();
}

OperationsBarrier::Token WasmEngine::StartWrapperCompilation(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
  DCHECK_EQ(1, isolates_.count(isolate));
  return isolates_[isolate]->wrapper_compilation_barrier_->TryLock();
992 993
}

994
void WasmEngine::AddIsolate(Isolate* isolate) {
995
  base::MutexGuard guard(&mutex_);
996
  DCHECK_EQ(0, isolates_.count(isolate));
997
  isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
998 999 1000 1001 1002 1003 1004 1005

  // Install sampling GC callback.
  // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
  // bias samples towards apps with high memory pressure. We should switch to
  // using sampling based on regular intervals independent of the GC.
  auto callback = [](v8::Isolate* v8_isolate, v8::GCType type,
                     v8::GCCallbackFlags flags, void* data) {
    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
1006
    Counters* counters = isolate->counters();
1007 1008 1009
    WasmEngine* engine = isolate->wasm_engine();
    base::MutexGuard lock(&engine->mutex_);
    DCHECK_EQ(1, engine->isolates_.count(isolate));
1010 1011
    for (auto* native_module : engine->isolates_[isolate]->native_modules) {
      native_module->SampleCodeSize(counters, NativeModule::kSampling);
1012 1013 1014 1015
    }
  };
  isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
                                         nullptr);
1016 1017 1018 1019 1020
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (gdb_server_) {
    gdb_server_->AddIsolate(isolate);
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
1021 1022 1023
}

void WasmEngine::RemoveIsolate(Isolate* isolate) {
1024 1025 1026 1027 1028 1029
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (gdb_server_) {
    gdb_server_->RemoveIsolate(isolate);
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

1030
  base::MutexGuard guard(&mutex_);
1031 1032
  auto it = isolates_.find(isolate);
  DCHECK_NE(isolates_.end(), it);
1033 1034
  std::unique_ptr<IsolateInfo> info = std::move(it->second);
  isolates_.erase(it);
1035
  for (auto* native_module : info->native_modules) {
1036 1037 1038 1039
    DCHECK_EQ(1, native_modules_.count(native_module));
    DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
    auto* info = native_modules_[native_module].get();
    info->isolates.erase(isolate);
1040 1041 1042 1043 1044
    if (current_gc_info_) {
      for (WasmCode* code : info->potentially_dead_code) {
        current_gc_info_->dead_code.erase(code);
      }
    }
1045 1046 1047
    if (native_module->HasDebugInfo()) {
      native_module->GetDebugInfo()->RemoveIsolate(isolate);
    }
1048
  }
1049 1050 1051
  if (current_gc_info_) {
    if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
  }
1052 1053 1054 1055 1056 1057
  if (auto* task = info->log_codes_task) {
    task->Cancel();
    for (auto& log_entry : info->code_to_log) {
      WasmCode::DecrementRefCount(VectorOf(log_entry.second.code));
    }
    info->code_to_log.clear();
1058
  }
1059
  DCHECK(info->code_to_log.empty());
1060 1061
}

1062 1063
void WasmEngine::LogCode(Vector<WasmCode*> code_vec) {
  if (code_vec.empty()) return;
1064
  base::MutexGuard guard(&mutex_);
1065
  NativeModule* native_module = code_vec[0]->native_module();
1066 1067
  DCHECK_EQ(1, native_modules_.count(native_module));
  for (Isolate* isolate : native_modules_[native_module]->isolates) {
1068 1069
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
1070
    if (info->log_codes == false) continue;
1071
    if (info->log_codes_task == nullptr) {
1072
      auto new_task = std::make_unique<LogCodesTask>(
1073
          &mutex_, &info->log_codes_task, isolate, this);
1074 1075
      info->log_codes_task = new_task.get();
      info->foreground_task_runner->PostTask(std::move(new_task));
1076 1077
    }
    if (info->code_to_log.empty()) {
1078
      isolate->stack_guard()->RequestLogWasmCode();
1079
    }
1080 1081 1082 1083
    for (WasmCode* code : code_vec) {
      DCHECK_EQ(native_module, code->native_module());
      code->IncRef();
    }
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094

    auto script_it = info->scripts.find(native_module);
    // If the script does not yet exist, logging will happen later. If the weak
    // handle is cleared already, we also don't need to log any more.
    if (script_it == info->scripts.end()) continue;
    auto& log_entry = info->code_to_log[script_it->second.script_id()];
    if (!log_entry.source_url) {
      log_entry.source_url = script_it->second.source_url();
    }
    log_entry.code.insert(log_entry.code.end(), code_vec.begin(),
                          code_vec.end());
1095 1096 1097
  }
}

1098 1099 1100 1101 1102 1103 1104
void WasmEngine::EnableCodeLogging(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
  auto it = isolates_.find(isolate);
  DCHECK_NE(isolates_.end(), it);
  it->second->log_codes = true;
}

1105
void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
1106 1107
  // Under the mutex, get the vector of wasm code to log. Then log and decrement
  // the ref count without holding the mutex.
1108
  std::unordered_map<int, IsolateInfo::CodeToLogPerScript> code_to_log;
1109 1110 1111 1112 1113
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    code_to_log.swap(isolates_[isolate]->code_to_log);
  }
1114

1115 1116
  // Check again whether we still need to log code.
  bool should_log = WasmCode::ShouldBeLogged(isolate);
1117

1118
  TRACE_EVENT0("v8.wasm", "wasm.LogCode");
1119
  for (auto& pair : code_to_log) {
1120 1121 1122 1123
    for (WasmCode* code : pair.second.code) {
      if (should_log) {
        code->LogCode(isolate, pair.second.source_url.get(), pair.first);
      }
1124
    }
1125
    WasmCode::DecrementRefCount(VectorOf(pair.second.code));
1126 1127 1128
  }
}

1129 1130
std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
    Isolate* isolate, const WasmFeatures& enabled,
1131
    std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
1132 1133
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (FLAG_wasm_gdb_remote && !gdb_server_) {
1134
    gdb_server_ = gdb_server::GdbServer::Create();
1135
    gdb_server_->AddIsolate(isolate);
1136 1137 1138
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

1139 1140
  std::shared_ptr<NativeModule> native_module = code_manager_.NewNativeModule(
      this, isolate, enabled, code_size_estimate, std::move(module));
1141
  base::MutexGuard lock(&mutex_);
1142
  auto pair = native_modules_.insert(std::make_pair(
1143
      native_module.get(), std::make_unique<NativeModuleInfo>(native_module)));
1144 1145
  DCHECK(pair.second);  // inserted new entry.
  pair.first->second.get()->isolates.insert(isolate);
1146 1147 1148
  auto* isolate_info = isolates_[isolate].get();
  isolate_info->native_modules.insert(native_module.get());
  if (isolate_info->keep_tiered_down) {
1149
    native_module->SetTieringState(kTieredDown);
1150
  }
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

  // Record memory protection key support.
  if (FLAG_wasm_memory_protection_keys && !isolate_info->pku_support_sampled) {
    isolate_info->pku_support_sampled = true;
    auto* histogram =
        isolate->counters()->wasm_memory_protection_keys_support();
    bool has_mpk =
        code_manager_.memory_protection_key_ != kNoMemoryProtectionKey;
    histogram->AddSample(has_mpk ? 1 : 0);
  }

1162
  isolate->counters()->wasm_modules_per_isolate()->AddSample(
1163
      static_cast<int>(isolate_info->native_modules.size()));
1164 1165
  isolate->counters()->wasm_modules_per_engine()->AddSample(
      static_cast<int>(native_modules_.size()));
1166 1167 1168
  return native_module;
}

1169
std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
1170 1171 1172
    ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
  std::shared_ptr<NativeModule> native_module =
      native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
1173
  bool recompile_module = false;
1174 1175 1176 1177
  if (native_module) {
    base::MutexGuard guard(&mutex_);
    auto& native_module_info = native_modules_[native_module.get()];
    if (!native_module_info) {
1178
      native_module_info = std::make_unique<NativeModuleInfo>(native_module);
1179 1180
    }
    native_module_info->isolates.insert(isolate);
1181
    isolates_[isolate]->native_modules.insert(native_module.get());
1182 1183 1184 1185
    if (isolates_[isolate]->keep_tiered_down) {
      native_module->SetTieringState(kTieredDown);
      recompile_module = true;
    }
1186
  }
1187
  // Potentially recompile the module for tier down, after releasing the mutex.
1188
  if (recompile_module) native_module->RecompileForTiering();
1189
  return native_module;
1190 1191
}

1192
bool WasmEngine::UpdateNativeModuleCache(
1193 1194
    bool error, std::shared_ptr<NativeModule>* native_module,
    Isolate* isolate) {
1195
  DCHECK_EQ(this, native_module->get()->engine());
1196 1197 1198 1199 1200
  // Pass {native_module} by value here to keep it alive until at least after
  // we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
  // which would lock the mutex twice.
  auto prev = native_module->get();
  *native_module = native_module_cache_.Update(*native_module, error);
1201 1202 1203

  if (prev == native_module->get()) return true;

1204 1205 1206 1207 1208 1209
  bool recompile_module = false;
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, native_modules_.count(native_module->get()));
    native_modules_[native_module->get()]->isolates.insert(isolate);
    DCHECK_EQ(1, isolates_.count(isolate));
1210
    isolates_[isolate]->native_modules.insert(native_module->get());
1211 1212 1213 1214 1215 1216
    if (isolates_[isolate]->keep_tiered_down) {
      native_module->get()->SetTieringState(kTieredDown);
      recompile_module = true;
    }
  }
  // Potentially recompile the module for tier down, after releasing the mutex.
1217
  if (recompile_module) native_module->get()->RecompileForTiering();
1218
  return false;
1219 1220 1221 1222 1223 1224 1225 1226
}

bool WasmEngine::GetStreamingCompilationOwnership(size_t prefix_hash) {
  return native_module_cache_.GetStreamingCompilationOwnership(prefix_hash);
}

void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
  native_module_cache_.StreamingCompilationFailed(prefix_hash);
1227 1228
}

1229
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
1230 1231 1232 1233 1234 1235 1236 1237
  base::MutexGuard guard(&mutex_);
  auto it = native_modules_.find(native_module);
  DCHECK_NE(native_modules_.end(), it);
  for (Isolate* isolate : it->second->isolates) {
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
    DCHECK_EQ(1, info->native_modules.count(native_module));
    info->native_modules.erase(native_module);
1238
    info->scripts.erase(native_module);
1239 1240 1241
    // If there are {WasmCode} objects of the deleted {NativeModule}
    // outstanding to be logged in this isolate, remove them. Decrementing the
    // ref count is not needed, since the {NativeModule} dies anyway.
1242
    for (auto& log_entry : info->code_to_log) {
1243 1244 1245
      auto part_of_native_module = [native_module](WasmCode* code) {
        return code->native_module() == native_module;
      };
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
      std::vector<WasmCode*>& code = log_entry.second.code;
      auto new_end =
          std::remove_if(code.begin(), code.end(), part_of_native_module);
      code.erase(new_end, code.end());
    }
    // Now remove empty entries in {code_to_log}.
    for (auto it = info->code_to_log.begin(), end = info->code_to_log.end();
         it != end;) {
      if (it->second.code.empty()) {
        it = info->code_to_log.erase(it);
      } else {
        ++it;
      }
1259
    }
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
  }
  // If there is a GC running which has references to code contained in the
  // deleted {NativeModule}, remove those references.
  if (current_gc_info_) {
    for (auto it = current_gc_info_->dead_code.begin(),
              end = current_gc_info_->dead_code.end();
         it != end;) {
      if ((*it)->native_module() == native_module) {
        it = current_gc_info_->dead_code.erase(it);
      } else {
        ++it;
1271 1272
      }
    }
1273 1274
    TRACE_CODE_GC("Native module %p died, reducing dead code objects to %zu.\n",
                  native_module, current_gc_info_->dead_code.size());
1275
  }
1276
  native_module_cache_.Erase(native_module);
1277
  native_modules_.erase(it);
1278 1279
}

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
namespace {
class SampleTopTierCodeSizeTask : public CancelableTask {
 public:
  SampleTopTierCodeSizeTask(Isolate* isolate,
                            std::weak_ptr<NativeModule> native_module)
      : CancelableTask(isolate),
        isolate_(isolate),
        native_module_(std::move(native_module)) {}

  void RunInternal() override {
    if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
      native_module->SampleCodeSize(isolate_->counters(),
                                    NativeModule::kAfterTopTier);
    }
  }

 private:
  Isolate* const isolate_;
  const std::weak_ptr<NativeModule> native_module_;
};
}  // namespace

void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
    const std::shared_ptr<NativeModule>& native_module) {
  base::MutexGuard lock(&mutex_);
1305 1306
  DCHECK_EQ(1, native_modules_.count(native_module.get()));
  for (Isolate* isolate : native_modules_[native_module.get()]->isolates) {
1307 1308 1309
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
    info->foreground_task_runner->PostTask(
1310
        std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
1311 1312 1313
  }
}

1314 1315
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
                                     Vector<WasmCode*> live_code) {
1316
  TRACE_EVENT0("v8.wasm", "wasm.ReportLiveCodeForGC");
1317 1318
  TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
                live_code.size());
1319
  base::MutexGuard guard(&mutex_);
1320 1321 1322
  // This report might come in late (note that we trigger both a stack guard and
  // a foreground task). In that case, ignore it.
  if (current_gc_info_ == nullptr) return;
1323
  if (!RemoveIsolateFromCurrentGC(isolate)) return;
1324 1325
  isolate->counters()->wasm_module_num_triggered_code_gcs()->AddSample(
      current_gc_info_->gc_sequence_index);
1326
  for (WasmCode* code : live_code) current_gc_info_->dead_code.erase(code);
1327
  PotentiallyFinishCurrentGC();
1328 1329
}

1330 1331 1332 1333 1334
void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
  wasm::WasmCodeRefScope code_ref_scope;
  std::unordered_set<wasm::WasmCode*> live_wasm_code;
  for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
    StackFrame* const frame = it.frame();
1335 1336
    if (frame->type() != StackFrame::WASM) continue;
    live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
#if V8_TARGET_ARCH_X64
    if (WasmFrame::cast(frame)->wasm_code()->for_debugging()) {
      Address osr_target = base::Memory<Address>(WasmFrame::cast(frame)->fp() -
                                                 kOSRTargetOffset);
      if (osr_target) {
        WasmCode* osr_code =
            isolate->wasm_engine()->code_manager()->LookupCode(osr_target);
        DCHECK_NOT_NULL(osr_code);
        live_wasm_code.insert(osr_code);
      }
    }
#endif
1349 1350
  }

1351 1352
  CheckNoArchivedThreads(isolate);

1353 1354 1355 1356
  ReportLiveCodeForGC(isolate,
                      OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}

1357 1358 1359 1360
bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
  base::MutexGuard guard(&mutex_);
  auto it = native_modules_.find(code->native_module());
  DCHECK_NE(native_modules_.end(), it);
1361 1362 1363
  NativeModuleInfo* info = it->second.get();
  if (info->dead_code.count(code)) return false;  // Code is already dead.
  auto added = info->potentially_dead_code.insert(code);
1364 1365
  if (!added.second) return false;  // An entry already existed.
  new_potentially_dead_code_size_ += code->instructions().size();
1366
  if (FLAG_wasm_code_gc) {
1367
    // Trigger a GC if 64kB plus 10% of committed code are potentially dead.
1368 1369 1370
    size_t dead_code_limit =
        FLAG_stress_wasm_code_gc
            ? 0
1371
            : 64 * KB + code_manager_.committed_code_space() / 10;
1372
    if (new_potentially_dead_code_size_ > dead_code_limit) {
1373 1374
      bool inc_gc_count =
          info->num_code_gcs_triggered < std::numeric_limits<int8_t>::max();
1375
      if (current_gc_info_ == nullptr) {
1376
        if (inc_gc_count) ++info->num_code_gcs_triggered;
1377 1378 1379
        TRACE_CODE_GC(
            "Triggering GC (potentially dead: %zu bytes; limit: %zu bytes).\n",
            new_potentially_dead_code_size_, dead_code_limit);
1380 1381 1382
        TriggerGC(info->num_code_gcs_triggered);
      } else if (current_gc_info_->next_gc_sequence_index == 0) {
        if (inc_gc_count) ++info->num_code_gcs_triggered;
1383 1384 1385 1386
        TRACE_CODE_GC(
            "Scheduling another GC after the current one (potentially dead: "
            "%zu bytes; limit: %zu bytes).\n",
            new_potentially_dead_code_size_, dead_code_limit);
1387 1388
        current_gc_info_->next_gc_sequence_index = info->num_code_gcs_triggered;
        DCHECK_NE(0, current_gc_info_->next_gc_sequence_index);
1389
      }
1390
    }
1391
  }
1392 1393 1394
  return true;
}

1395 1396 1397 1398 1399 1400
void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
  base::MutexGuard guard(&mutex_);
  FreeDeadCodeLocked(dead_code);
}

void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
1401
  TRACE_EVENT0("v8.wasm", "wasm.FreeDeadCode");
1402 1403 1404 1405
  DCHECK(!mutex_.TryLock());
  for (auto& dead_code_entry : dead_code) {
    NativeModule* native_module = dead_code_entry.first;
    const std::vector<WasmCode*>& code_vec = dead_code_entry.second;
1406 1407
    DCHECK_EQ(1, native_modules_.count(native_module));
    auto* info = native_modules_[native_module].get();
1408 1409 1410
    TRACE_CODE_GC("Freeing %zu code object%s of module %p.\n", code_vec.size(),
                  code_vec.size() == 1 ? "" : "s", native_module);
    for (WasmCode* code : code_vec) {
1411 1412 1413
      DCHECK_EQ(1, info->dead_code.count(code));
      info->dead_code.erase(code);
    }
1414
    native_module->FreeCode(VectorOf(code_vec));
1415 1416 1417
  }
}

1418 1419 1420
Handle<Script> WasmEngine::GetOrCreateScript(
    Isolate* isolate, const std::shared_ptr<NativeModule>& native_module,
    Vector<const char> source_url) {
1421 1422 1423 1424
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    auto& scripts = isolates_[isolate]->scripts;
1425
    auto it = scripts.find(native_module.get());
1426 1427
    if (it != scripts.end()) {
      Handle<Script> weak_global_handle = it->second.handle();
1428 1429 1430
      if (weak_global_handle.is_null()) {
        scripts.erase(it);
      } else {
1431 1432 1433 1434 1435
        return Handle<Script>::New(*weak_global_handle, isolate);
      }
    }
  }
  // Temporarily release the mutex to let the GC collect native modules.
1436
  auto script = CreateWasmScript(isolate, native_module, source_url);
1437 1438 1439 1440
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    auto& scripts = isolates_[isolate]->scripts;
1441 1442
    DCHECK_EQ(0, scripts.count(native_module.get()));
    scripts.emplace(native_module.get(), WeakScriptHandle(script));
1443
    return script;
1444 1445 1446
  }
}

1447 1448 1449
std::shared_ptr<OperationsBarrier>
WasmEngine::GetBarrierForBackgroundCompile() {
  return operations_barrier_;
1450 1451
}

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
namespace {
void SampleExceptionEvent(base::ElapsedTimer* timer, TimedHistogram* counter) {
  if (!timer->IsStarted()) {
    timer->Start();
    return;
  }
  counter->AddSample(static_cast<int>(timer->Elapsed().InMilliseconds()));
  timer->Restart();
}
}  // namespace

void WasmEngine::SampleThrowEvent(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
1465 1466
  IsolateInfo* isolate_info = isolates_[isolate].get();
  int& throw_count = isolate_info->throw_count;
1467 1468 1469
  // To avoid an int overflow, clip the count to the histogram's max value.
  throw_count =
      std::min(throw_count + 1, isolate->counters()->wasm_throw_count()->max());
1470 1471
  isolate->counters()->wasm_throw_count()->AddSample(throw_count);
  SampleExceptionEvent(&isolate_info->throw_timer,
1472 1473 1474 1475 1476
                       isolate->counters()->wasm_time_between_throws());
}

void WasmEngine::SampleRethrowEvent(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
1477 1478
  IsolateInfo* isolate_info = isolates_[isolate].get();
  int& rethrow_count = isolate_info->rethrow_count;
1479 1480 1481
  // To avoid an int overflow, clip the count to the histogram's max value.
  rethrow_count = std::min(rethrow_count + 1,
                           isolate->counters()->wasm_rethrow_count()->max());
1482 1483
  isolate->counters()->wasm_rethrow_count()->AddSample(rethrow_count);
  SampleExceptionEvent(&isolate_info->rethrow_timer,
1484 1485 1486 1487 1488
                       isolate->counters()->wasm_time_between_rethrows());
}

void WasmEngine::SampleCatchEvent(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
1489 1490
  IsolateInfo* isolate_info = isolates_[isolate].get();
  int& catch_count = isolate_info->catch_count;
1491 1492 1493
  // To avoid an int overflow, clip the count to the histogram's max value.
  catch_count =
      std::min(catch_count + 1, isolate->counters()->wasm_catch_count()->max());
1494 1495
  isolate->counters()->wasm_catch_count()->AddSample(catch_count);
  SampleExceptionEvent(&isolate_info->catch_timer,
1496 1497 1498
                       isolate->counters()->wasm_time_between_catch());
}

1499
void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
1500
  DCHECK(!mutex_.TryLock());
1501 1502
  DCHECK_NULL(current_gc_info_);
  DCHECK(FLAG_wasm_code_gc);
1503
  new_potentially_dead_code_size_ = 0;
1504
  current_gc_info_.reset(new CurrentGCInfo(gc_sequence_index));
1505 1506 1507 1508 1509 1510 1511 1512
  // Add all potentially dead code to this GC, and trigger a GC task in each
  // isolate.
  for (auto& entry : native_modules_) {
    NativeModuleInfo* info = entry.second.get();
    if (info->potentially_dead_code.empty()) continue;
    for (auto* isolate : native_modules_[entry.first]->isolates) {
      auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
      if (!gc_task) {
1513
        auto new_task = std::make_unique<WasmGCForegroundTask>(isolate);
1514 1515 1516 1517 1518
        gc_task = new_task.get();
        DCHECK_EQ(1, isolates_.count(isolate));
        isolates_[isolate]->foreground_task_runner->PostTask(
            std::move(new_task));
      }
1519
      isolate->stack_guard()->RequestWasmCodeGC();
1520 1521 1522 1523 1524
    }
    for (WasmCode* code : info->potentially_dead_code) {
      current_gc_info_->dead_code.insert(code);
    }
  }
1525
  TRACE_CODE_GC(
1526 1527
      "Starting GC (nr %d). Number of potentially dead code objects: %zu\n",
      current_gc_info_->gc_sequence_index, current_gc_info_->dead_code.size());
1528 1529 1530 1531 1532
  // Ensure that there are outstanding isolates that will eventually finish this
  // GC. If there are no outstanding isolates, we finish the GC immediately.
  PotentiallyFinishCurrentGC();
  DCHECK(current_gc_info_ == nullptr ||
         !current_gc_info_->outstanding_isolates.empty());
1533 1534
}

1535 1536 1537
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
  DCHECK(!mutex_.TryLock());
  DCHECK_NOT_NULL(current_gc_info_);
1538
  return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
}

void WasmEngine::PotentiallyFinishCurrentGC() {
  DCHECK(!mutex_.TryLock());
  TRACE_CODE_GC(
      "Remaining dead code objects: %zu; outstanding isolates: %zu.\n",
      current_gc_info_->dead_code.size(),
      current_gc_info_->outstanding_isolates.size());

  // If there are more outstanding isolates, return immediately.
  if (!current_gc_info_->outstanding_isolates.empty()) return;

  // All remaining code in {current_gc_info->dead_code} is really dead.
  // Move it from the set of potentially dead code to the set of dead code,
  // and decrement its ref count.
1554
  size_t num_freed = 0;
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
  DeadCodeMap dead_code;
  for (WasmCode* code : current_gc_info_->dead_code) {
    DCHECK_EQ(1, native_modules_.count(code->native_module()));
    auto* native_module_info = native_modules_[code->native_module()].get();
    DCHECK_EQ(1, native_module_info->potentially_dead_code.count(code));
    native_module_info->potentially_dead_code.erase(code);
    DCHECK_EQ(0, native_module_info->dead_code.count(code));
    native_module_info->dead_code.insert(code);
    if (code->DecRefOnDeadCode()) {
      dead_code[code->native_module()].push_back(code);
1565
      ++num_freed;
1566 1567
    }
  }
1568 1569 1570

  FreeDeadCodeLocked(dead_code);

1571 1572
  TRACE_CODE_GC("Found %zu dead code objects, freed %zu.\n",
                current_gc_info_->dead_code.size(), num_freed);
1573 1574
  USE(num_freed);

1575
  int8_t next_gc_sequence_index = current_gc_info_->next_gc_sequence_index;
1576
  current_gc_info_.reset();
1577
  if (next_gc_sequence_index != 0) TriggerGC(next_gc_sequence_index);
1578 1579
}

1580 1581
namespace {

1582
WasmEngine* global_wasm_engine = nullptr;
1583 1584 1585

}  // namespace

1586
// static
1587
void WasmEngine::InitializeOncePerProcess() {
1588 1589
  DCHECK_NULL(global_wasm_engine);
  global_wasm_engine = new WasmEngine();
1590 1591
}

1592
// static
1593
void WasmEngine::GlobalTearDown() {
1594 1595 1596 1597 1598
  // Note: This can be called multiple times in a row (see
  // test-api/InitializeAndDisposeMultiple). This is fine, as
  // {global_wasm_engine} will be nullptr then.
  delete global_wasm_engine;
  global_wasm_engine = nullptr;
1599 1600
}

1601
// static
1602 1603 1604
WasmEngine* WasmEngine::GetWasmEngine() {
  DCHECK_NOT_NULL(global_wasm_engine);
  return global_wasm_engine;
1605 1606
}

1607 1608
// {max_mem_pages} is declared in wasm-limits.h.
uint32_t max_mem_pages() {
1609 1610 1611 1612
  STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
  return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}

1613 1614 1615 1616 1617 1618
// {max_table_init_entries} is declared in wasm-limits.h.
uint32_t max_table_init_entries() {
  return std::min(uint32_t{kV8MaxWasmTableInitEntries},
                  FLAG_wasm_max_table_size);
}

1619 1620 1621 1622 1623 1624 1625
// {max_module_size} is declared in wasm-limits.h.
size_t max_module_size() {
  return FLAG_experimental_wasm_allow_huge_modules
             ? RoundDown<kSystemPointerSize>(size_t{kMaxInt})
             : kV8MaxWasmModuleSize;
}

1626 1627
#undef TRACE_CODE_GC

1628 1629 1630
}  // namespace wasm
}  // namespace internal
}  // namespace v8