wasm-engine.cc 51.1 KB
Newer Older
1 2 3 4 5
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/wasm/wasm-engine.h"
6

7
#include "src/base/functional.h"
8
#include "src/base/platform/time.h"
9
#include "src/common/globals.h"
10
#include "src/diagnostics/code-tracer.h"
11
#include "src/diagnostics/compilation-statistics.h"
12
#include "src/execution/frames.h"
13
#include "src/execution/v8threads.h"
14
#include "src/logging/counters.h"
15
#include "src/objects/heap-number.h"
16
#include "src/objects/js-promise.h"
17
#include "src/objects/objects-inl.h"
18
#include "src/strings/string-hasher-inl.h"
19
#include "src/utils/ostreams.h"
20
#include "src/wasm/function-compiler.h"
21
#include "src/wasm/module-compiler.h"
Marja Hölttä's avatar
Marja Hölttä committed
22
#include "src/wasm/module-decoder.h"
23
#include "src/wasm/module-instantiate.h"
Marja Hölttä's avatar
Marja Hölttä committed
24
#include "src/wasm/streaming-decoder.h"
25
#include "src/wasm/wasm-limits.h"
26
#include "src/wasm/wasm-objects-inl.h"
27

28 29 30 31
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
#include "src/debug/wasm/gdb-server/gdb-server.h"
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

32 33 34 35
namespace v8 {
namespace internal {
namespace wasm {

36 37 38 39 40
#define TRACE_CODE_GC(...)                                         \
  do {                                                             \
    if (FLAG_trace_wasm_code_gc) PrintF("[wasm-gc] " __VA_ARGS__); \
  } while (false)

41
namespace {
42 43 44
// A task to log a set of {WasmCode} objects in an isolate. It does not own any
// data itself, since it is owned by the platform, so lifetime is not really
// bound to the wasm engine.
45 46
class LogCodesTask : public Task {
 public:
47 48 49 50 51 52
  LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate,
               WasmEngine* engine)
      : mutex_(mutex),
        task_slot_(task_slot),
        isolate_(isolate),
        engine_(engine) {
53 54 55 56 57 58 59 60
    DCHECK_NOT_NULL(task_slot);
    DCHECK_NOT_NULL(isolate);
  }

  ~LogCodesTask() {
    // If the platform deletes this task before executing it, we also deregister
    // it to avoid use-after-free from still-running background threads.
    if (!cancelled()) DeregisterTask();
61
  }
62 63

  void Run() override {
64 65
    if (cancelled()) return;
    DeregisterTask();
66
    engine_->LogOutstandingCodesForIsolate(isolate_);
67 68 69 70 71 72 73 74
  }

  void Cancel() {
    // Cancel will only be called on Isolate shutdown, which happens on the
    // Isolate's foreground thread. Thus no synchronization needed.
    isolate_ = nullptr;
  }

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
  bool cancelled() const { return isolate_ == nullptr; }

  void DeregisterTask() {
    // The task will only be deregistered from the foreground thread (executing
    // this task or calling its destructor), thus we do not need synchronization
    // on this field access.
    if (task_slot_ == nullptr) return;  // already deregistered.
    // Remove this task from the {IsolateInfo} in the engine. The next
    // logging request will allocate and schedule a new task.
    base::MutexGuard guard(mutex_);
    DCHECK_EQ(this, *task_slot_);
    *task_slot_ = nullptr;
    task_slot_ = nullptr;
  }

90 91 92 93
 private:
  // The mutex of the WasmEngine.
  base::Mutex* const mutex_;
  // The slot in the WasmEngine where this LogCodesTask is stored. This is
94 95
  // cleared by this task before execution or on task destruction.
  LogCodesTask** task_slot_;
96
  Isolate* isolate_;
97
  WasmEngine* const engine_;
98
};
99

100 101 102 103 104 105 106 107 108 109 110 111
void CheckNoArchivedThreads(Isolate* isolate) {
  class ArchivedThreadsVisitor : public ThreadVisitor {
    void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
      // Archived threads are rarely used, and not combined with Wasm at the
      // moment. Implement this and test it properly once we have a use case for
      // that.
      FATAL("archived threads in combination with wasm not supported");
    }
  } archived_threads_visitor;
  isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
}

112
class WasmGCForegroundTask : public CancelableTask {
113
 public:
114 115
  explicit WasmGCForegroundTask(Isolate* isolate)
      : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
116

117
  void RunInternal() final {
118 119 120
    WasmEngine* engine = isolate_->wasm_engine();
    // If the foreground task is executing, there is no wasm code active. Just
    // report an empty set of live wasm code.
121 122 123 124 125
#ifdef ENABLE_SLOW_DCHECKS
    for (StackFrameIterator it(isolate_); !it.done(); it.Advance()) {
      DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
    }
#endif
126
    CheckNoArchivedThreads(isolate_);
127 128 129 130 131 132 133
    engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
  }

 private:
  Isolate* isolate_;
};

134 135 136 137 138 139 140 141 142
class WeakScriptHandle {
 public:
  explicit WeakScriptHandle(Handle<Script> handle) {
    auto global_handle =
        handle->GetIsolate()->global_handles()->Create(*handle);
    location_ = std::make_unique<Address*>(global_handle.location());
    GlobalHandles::MakeWeak(location_.get());
  }

143 144 145 146 147 148 149
  // Usually the destructor of this class should always be called after the weak
  // callback because the Script keeps the NativeModule alive. So we expect the
  // handle to be destroyed and the location to be reset already.
  // We cannot check this because of one exception. When the native module is
  // freed during isolate shutdown, the destructor will be called
  // first, and the callback will never be called.
  ~WeakScriptHandle() = default;
150

151
  WeakScriptHandle(WeakScriptHandle&&) V8_NOEXCEPT = default;
152 153 154 155 156 157 158 159 160

  Handle<Script> handle() { return Handle<Script>(*location_); }

 private:
  // Store the location in a unique_ptr so that its address stays the same even
  // when this object is moved/copied.
  std::unique_ptr<Address*> location_;
};

161 162
}  // namespace

163 164 165 166
std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
    ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
  if (origin != kWasmOrigin) return nullptr;
  base::MutexGuard lock(&mutex_);
167 168
  size_t prefix_hash = PrefixHash(wire_bytes);
  NativeModuleCache::Key key{prefix_hash, wire_bytes};
169
  while (true) {
170
    auto it = map_.find(key);
171
    if (it == map_.end()) {
172 173 174 175 176 177 178
      // Even though this exact key is not in the cache, there might be a
      // matching prefix hash indicating that a streaming compilation is
      // currently compiling a module with the same prefix. {OnFinishedStream}
      // happens on the main thread too, so waiting for streaming compilation to
      // finish would create a deadlock. Instead, compile the module twice and
      // handle the conflict in {UpdateNativeModuleCache}.

179 180
      // Insert a {nullopt} entry to let other threads know that this
      // {NativeModule} is already being created on another thread.
181 182 183
      auto p = map_.emplace(key, base::nullopt);
      USE(p);
      DCHECK(p.second);
184 185
      return nullptr;
    }
186 187
    if (it->second.has_value()) {
      if (auto shared_native_module = it->second.value().lock()) {
188
        DCHECK_EQ(shared_native_module->wire_bytes(), wire_bytes);
189 190 191 192 193 194 195
        return shared_native_module;
      }
    }
    cache_cv_.Wait(&mutex_);
  }
}

196 197 198 199
bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
  base::MutexGuard lock(&mutex_);
  auto it = map_.lower_bound(Key{prefix_hash, {}});
  if (it != map_.end() && it->first.prefix_hash == prefix_hash) {
200 201
    DCHECK_IMPLIES(!it->first.bytes.empty(),
                   PrefixHash(it->first.bytes) == prefix_hash);
202 203 204
    return false;
  }
  Key key{prefix_hash, {}};
205 206 207
  DCHECK_EQ(0, map_.count(key));
  map_.emplace(key, base::nullopt);
  return true;
208 209 210 211
}

void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
  base::MutexGuard lock(&mutex_);
212 213 214
  Key key{prefix_hash, {}};
  DCHECK_EQ(1, map_.count(key));
  map_.erase(key);
215 216 217 218 219
  cache_cv_.NotifyAll();
}

std::shared_ptr<NativeModule> NativeModuleCache::Update(
    std::shared_ptr<NativeModule> native_module, bool error) {
220
  DCHECK_NOT_NULL(native_module);
221
  if (native_module->module()->origin != kWasmOrigin) return native_module;
222
  Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
223
  DCHECK(!wire_bytes.empty());
224
  size_t prefix_hash = PrefixHash(native_module->wire_bytes());
225
  base::MutexGuard lock(&mutex_);
226 227 228 229 230 231 232
  map_.erase(Key{prefix_hash, {}});
  const Key key{prefix_hash, wire_bytes};
  auto it = map_.find(key);
  if (it != map_.end()) {
    if (it->second.has_value()) {
      auto conflicting_module = it->second.value().lock();
      if (conflicting_module != nullptr) {
233
        DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
234 235 236 237 238
        return conflicting_module;
      }
    }
    map_.erase(it);
  }
239
  if (!error) {
240 241 242 243 244 245 246
    // The key now points to the new native module's owned copy of the bytes,
    // so that it stays valid until the native module is freed and erased from
    // the map.
    auto p = map_.emplace(
        key, base::Optional<std::weak_ptr<NativeModule>>(native_module));
    USE(p);
    DCHECK(p.second);
247 248
  }
  cache_cv_.NotifyAll();
249
  return native_module;
250 251 252
}

void NativeModuleCache::Erase(NativeModule* native_module) {
253 254
  if (native_module->module()->origin != kWasmOrigin) return;
  // Happens in some tests where bytes are set directly.
255
  if (native_module->wire_bytes().empty()) return;
256
  base::MutexGuard lock(&mutex_);
257
  size_t prefix_hash = PrefixHash(native_module->wire_bytes());
258 259
  map_.erase(Key{prefix_hash, native_module->wire_bytes()});
  cache_cv_.NotifyAll();
260 261
}

262 263
// static
size_t NativeModuleCache::WireBytesHash(Vector<const uint8_t> bytes) {
264 265 266 267 268
  return StringHasher::HashSequentialString(
      reinterpret_cast<const char*>(bytes.begin()), bytes.length(),
      kZeroHashSeed);
}

269 270 271 272 273 274 275 276 277 278 279 280
// static
size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
  // Compute the hash as a combined hash of the sections up to the code section
  // header, to mirror the way streaming compilation does it.
  Decoder decoder(wire_bytes.begin(), wire_bytes.end());
  decoder.consume_bytes(8, "module header");
  size_t hash = NativeModuleCache::WireBytesHash(wire_bytes.SubVector(0, 8));
  SectionCode section_id = SectionCode::kUnknownSectionCode;
  while (decoder.ok() && decoder.more()) {
    section_id = static_cast<SectionCode>(decoder.consume_u8());
    uint32_t section_size = decoder.consume_u32v("section size");
    if (section_id == SectionCode::kCodeSectionCode) {
281 282 283 284 285 286
      uint32_t num_functions = decoder.consume_u32v("num functions");
      // If {num_functions} is 0, the streaming decoder skips the section. Do
      // the same here to ensure hashes are consistent.
      if (num_functions != 0) {
        hash = base::hash_combine(hash, section_size);
      }
287 288 289 290 291 292 293 294 295 296 297
      break;
    }
    const uint8_t* payload_start = decoder.pc();
    decoder.consume_bytes(section_size, "section payload");
    size_t section_hash = NativeModuleCache::WireBytesHash(
        Vector<const uint8_t>(payload_start, section_size));
    hash = base::hash_combine(hash, section_hash);
  }
  return hash;
}

298
struct WasmEngine::CurrentGCInfo {
299 300 301 302 303
  explicit CurrentGCInfo(int8_t gc_sequence_index)
      : gc_sequence_index(gc_sequence_index) {
    DCHECK_NE(0, gc_sequence_index);
  }

304 305 306 307 308 309 310
  // Set of isolates that did not scan their stack yet for used WasmCode, and
  // their scheduled foreground task.
  std::unordered_map<Isolate*, WasmGCForegroundTask*> outstanding_isolates;

  // Set of dead code. Filled with all potentially dead code on initialization.
  // Code that is still in-use is removed by the individual isolates.
  std::unordered_set<WasmCode*> dead_code;
311

312 313 314 315 316
  // The number of GCs triggered in the native module that triggered this GC.
  // This is stored in the histogram for each participating isolate during
  // execution of that isolate's foreground task.
  const int8_t gc_sequence_index;

317 318
  // If during this GC, another GC was requested, we skipped that other GC (we
  // only run one GC at a time). Remember though to trigger another one once
319 320 321 322
  // this one finishes. {next_gc_sequence_index} is 0 if no next GC is needed,
  // and >0 otherwise. It stores the {num_code_gcs_triggered} of the native
  // module which triggered the next GC.
  int8_t next_gc_sequence_index = 0;
323 324 325 326

  // The start time of this GC; used for tracing and sampled via {Counters}.
  // Can be null ({TimeTicks::IsNull()}) if timer is not high resolution.
  base::TimeTicks start_time;
327 328
};

329
struct WasmEngine::IsolateInfo {
330
  explicit IsolateInfo(Isolate* isolate)
331 332
      : log_codes(WasmCode::ShouldBeLogged(isolate)),
        async_counters(isolate->async_counters()) {
333 334 335 336 337
    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
    v8::Platform* platform = V8::GetCurrentPlatform();
    foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
  }

338 339 340 341 342 343 344 345
#ifdef DEBUG
  ~IsolateInfo() {
    // Before destructing, the {WasmEngine} must have cleared outstanding code
    // to log.
    DCHECK_EQ(0, code_to_log.size());
  }
#endif

346 347 348
  // All native modules that are being used by this Isolate (currently only
  // grows, never shrinks).
  std::set<NativeModule*> native_modules;
349

350 351 352
  // Scripts created for each native module in this isolate.
  std::unordered_map<NativeModule*, WeakScriptHandle> scripts;

353 354 355
  // Caches whether code needs to be logged on this isolate.
  bool log_codes;

356 357 358
  // The currently scheduled LogCodesTask.
  LogCodesTask* log_codes_task = nullptr;

359 360 361
  // The vector of code objects that still need to be logged in this isolate.
  std::vector<WasmCode*> code_to_log;

362 363
  // The foreground task runner of the isolate (can be called from background).
  std::shared_ptr<v8::TaskRunner> foreground_task_runner;
364 365

  const std::shared_ptr<Counters> async_counters;
366 367 368

  // Keep new modules in tiered down state.
  bool keep_tiered_down = false;
369 370
};

371 372 373 374
struct WasmEngine::NativeModuleInfo {
  // Set of isolates using this NativeModule.
  std::unordered_set<Isolate*> isolates;

375 376 377 378
  // Set of potentially dead code. This set holds one ref for each code object,
  // until code is detected to be really dead. At that point, the ref count is
  // decremented and code is move to the {dead_code} set. If the code is finally
  // deleted, it is also removed from {dead_code}.
379
  std::unordered_set<WasmCode*> potentially_dead_code;
380 381 382 383

  // Code that is not being executed in any isolate any more, but the ref count
  // did not drop to zero yet.
  std::unordered_set<WasmCode*> dead_code;
384 385 386 387

  // Number of code GCs triggered because code in this native module became
  // potentially dead.
  int8_t num_code_gcs_triggered = 0;
388 389
};

390
WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
391

392
WasmEngine::~WasmEngine() {
393 394
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  // Synchronize on the GDB-remote thread, if running.
395
  gdb_server_.reset();
396 397
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

398 399
  // Synchronize on all background compile tasks.
  background_compile_task_manager_.CancelAndWait();
400
  // All AsyncCompileJobs have been canceled.
401
  DCHECK(async_compile_jobs_.empty());
402 403
  // All Isolates have been deregistered.
  DCHECK(isolates_.empty());
404
  // All NativeModules did die.
405
  DCHECK(native_modules_.empty());
406 407
  // Native module cache does not leak.
  DCHECK(native_module_cache_.empty());
408
}
409

410 411
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
                              const ModuleWireBytes& bytes) {
412 413
  // TODO(titzer): remove dependency on the isolate.
  if (bytes.start() == nullptr || bytes.length() == 0) return false;
414
  ModuleResult result =
415
      DecodeWasmModule(enabled, bytes.start(), bytes.end(), true, kWasmOrigin,
416
                       isolate->counters(), allocator());
417 418 419
  return result.ok();
}

420
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
421
    Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
422
    Vector<const byte> asm_js_offset_table_bytes,
423 424 425 426
    Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
  ModuleOrigin origin = language_mode == LanguageMode::kSloppy
                            ? kAsmJsSloppyOrigin
                            : kAsmJsStrictOrigin;
427
  ModuleResult result =
428 429
      DecodeWasmModule(WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(),
                       false, origin, isolate->counters(), allocator());
430 431 432 433 434 435
  if (result.failed()) {
    // This happens once in a while when we have missed some limit check
    // in the asm parser. Output an error message to help diagnose, but crash.
    std::cout << result.error().message();
    UNREACHABLE();
  }
436

437 438 439
  result.value()->asm_js_offset_information =
      std::make_unique<AsmJsOffsetInformation>(asm_js_offset_table_bytes);

440
  // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
441 442
  // in {CompileToNativeModule}.
  Handle<FixedArray> export_wrappers;
443
  std::shared_ptr<NativeModule> native_module =
444
      CompileToNativeModule(isolate, WasmFeatures::ForAsmjs(), thrower,
445 446 447 448
                            std::move(result).value(), bytes, &export_wrappers);
  if (!native_module) return {};

  return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
449
                          uses_bitset);
450 451 452 453 454 455
}

Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
    Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
    Handle<Script> script) {
  std::shared_ptr<NativeModule> native_module =
456
      asm_wasm_data->managed_native_module().get();
457 458
  Handle<FixedArray> export_wrappers =
      handle(asm_wasm_data->export_wrappers(), isolate);
459 460
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
      isolate, std::move(native_module), script, export_wrappers);
461
  return module_object;
462 463 464
}

MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
465 466
    Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
    const ModuleWireBytes& bytes) {
467
  ModuleResult result =
468
      DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
469
                       isolate->counters(), allocator());
470
  if (result.failed()) {
471
    thrower->CompileFailed(result.error());
472 473 474
    return {};
  }

475
  // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
476
  // in {CompileToModuleObject}.
477
  Handle<FixedArray> export_wrappers;
478
  std::shared_ptr<NativeModule> native_module =
479 480 481 482
      CompileToNativeModule(isolate, enabled, thrower,
                            std::move(result).value(), bytes, &export_wrappers);
  if (!native_module) return {};

483 484 485 486 487 488 489 490 491 492 493
#ifdef DEBUG
  // Ensure that code GC will check this isolate for live code.
  {
    base::MutexGuard lock(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module.get()));
    DCHECK_EQ(1, native_modules_.count(native_module.get()));
    DCHECK_EQ(1, native_modules_[native_module.get()]->isolates.count(isolate));
  }
#endif

494
  Handle<Script> script = GetOrCreateScript(isolate, native_module);
495 496 497 498 499

  // Create the compiled module object and populate with compiled functions
  // and information needed at instantiation time. This object needs to be
  // serializable. Instantiation may occur off a deserialized version of this
  // object.
500 501
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
      isolate, std::move(native_module), script, export_wrappers);
502 503 504 505

  // Finish the Wasm script now and make it public to the debugger.
  isolate->debug()->OnAfterCompile(script);
  return module_object;
506 507 508 509 510 511 512 513 514 515
}

MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
    Isolate* isolate, ErrorThrower* thrower,
    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
    MaybeHandle<JSArrayBuffer> memory) {
  return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
                                     memory);
}

516 517 518
void WasmEngine::AsyncInstantiate(
    Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
519
  ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
520 521
  // Instantiate a TryCatch so that caught exceptions won't progagate out.
  // They will still be set as pending exceptions on the isolate.
522
  // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
523 524 525 526 527
  // start function and report thrown exception explicitly via out argument.
  v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
  catcher.SetVerbose(false);
  catcher.SetCaptureMessage(false);

528 529
  MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
      isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
530 531 532 533 534 535

  if (!instance_object.is_null()) {
    resolver->OnInstantiationSucceeded(instance_object.ToHandleChecked());
    return;
  }

536 537 538
  if (isolate->has_pending_exception()) {
    // The JS code executed during instantiation has thrown an exception.
    // We have to move the exception to the promise chain.
539 540 541 542
    Handle<Object> exception(isolate->pending_exception(), isolate);
    isolate->clear_pending_exception();
    *isolate->external_caught_exception_address() = false;
    resolver->OnInstantiationFailed(exception);
543 544 545 546
    thrower.Reset();
  } else {
    DCHECK(thrower.error());
    resolver->OnInstantiationFailed(thrower.Reify());
547 548 549
  }
}

550
void WasmEngine::AsyncCompile(
551
    Isolate* isolate, const WasmFeatures& enabled,
552
    std::shared_ptr<CompilationResultResolver> resolver,
553 554
    const ModuleWireBytes& bytes, bool is_shared,
    const char* api_method_name_for_errors) {
555 556
  if (!FLAG_wasm_async_compilation) {
    // Asynchronous compilation disabled; fall back on synchronous compilation.
557
    ErrorThrower thrower(isolate, api_method_name_for_errors);
558 559 560 561 562
    MaybeHandle<WasmModuleObject> module_object;
    if (is_shared) {
      // Make a copy of the wire bytes to avoid concurrent modification.
      std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
      memcpy(copy.get(), bytes.start(), bytes.length());
563
      ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
564
      module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy);
565 566
    } else {
      // The wire bytes are not shared, OK to use them directly.
567
      module_object = SyncCompile(isolate, enabled, &thrower, bytes);
568 569
    }
    if (thrower.error()) {
570
      resolver->OnCompilationFailed(thrower.Reify());
571 572 573
      return;
    }
    Handle<WasmModuleObject> module = module_object.ToHandleChecked();
574
    resolver->OnCompilationSucceeded(module);
575 576 577 578 579
    return;
  }

  if (FLAG_wasm_test_streaming) {
    std::shared_ptr<StreamingDecoder> streaming_decoder =
580 581 582
        StartStreamingCompilation(
            isolate, enabled, handle(isolate->context(), isolate),
            api_method_name_for_errors, std::move(resolver));
583 584 585 586 587 588 589 590
    streaming_decoder->OnBytesReceived(bytes.module_bytes());
    streaming_decoder->Finish();
    return;
  }
  // Make a copy of the wire bytes in case the user program changes them
  // during asynchronous compilation.
  std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
  memcpy(copy.get(), bytes.start(), bytes.length());
591

592 593 594 595
  AsyncCompileJob* job =
      CreateAsyncCompileJob(isolate, enabled, std::move(copy), bytes.length(),
                            handle(isolate->context(), isolate),
                            api_method_name_for_errors, std::move(resolver));
596 597 598 599
  job->Start();
}

std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
600
    Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
601
    const char* api_method_name,
602
    std::shared_ptr<CompilationResultResolver> resolver) {
603
  AsyncCompileJob* job =
604
      CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr),
605
                            0, context, api_method_name, std::move(resolver));
606
  return job->CreateStreamingDecoder();
607 608
}

609
void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
610
                                 uint32_t function_index, ExecutionTier tier) {
611
  // Note we assume that "one-off" compilations can discard detected features.
612
  WasmFeatures detected = WasmFeatures::None();
613
  WasmCompilationUnit::CompileWasmFunction(
614
      isolate, native_module, &detected,
615
      &native_module->module()->functions[function_index], tier);
616 617
}

618 619 620 621
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
  std::vector<NativeModule*> native_modules;
  {
    base::MutexGuard lock(&mutex_);
622 623
    if (isolates_[isolate]->keep_tiered_down) return;
    isolates_[isolate]->keep_tiered_down = true;
624 625 626 627 628 629 630 631 632
    for (auto* native_module : isolates_[isolate]->native_modules) {
      native_modules.push_back(native_module);
    }
  }
  for (auto* native_module : native_modules) {
    native_module->TierDown(isolate);
  }
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
  std::vector<NativeModule*> native_modules;
  {
    base::MutexGuard lock(&mutex_);
    isolates_[isolate]->keep_tiered_down = false;
    for (auto* native_module : isolates_[isolate]->native_modules) {
      native_modules.push_back(native_module);
    }
  }
  for (auto* native_module : native_modules) {
    native_module->TierUp(isolate);
  }
}

647 648
std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
    Handle<WasmModuleObject> module_object) {
649
  return module_object->shared_native_module();
650 651
}

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
namespace {
Handle<Script> CreateWasmScript(Isolate* isolate,
                                std::shared_ptr<NativeModule> native_module,
                                Vector<const char> source_url = {}) {
  Handle<Script> script =
      isolate->factory()->NewScript(isolate->factory()->empty_string());
  script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
  script->set_context_data(isolate->native_context()->debug_context_id());
  script->set_type(Script::TYPE_WASM);

  Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
  int hash = StringHasher::HashSequentialString(
      reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
      kZeroHashSeed);

  const int kBufferSize = 32;
  char buffer[kBufferSize];

  // Script name is "<module_name>-hash" if name is available and "hash"
  // otherwise.
  const WasmModule* module = native_module->module();
  Handle<String> name_str;
  if (module->name.is_set()) {
    int name_chars = SNPrintF(ArrayVector(buffer), "-%08x", hash);
    DCHECK(name_chars >= 0 && name_chars < kBufferSize);
    Handle<String> name_hash =
        isolate->factory()
            ->NewStringFromOneByte(
                VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
                AllocationType::kOld)
            .ToHandleChecked();
    Handle<String> module_name =
        WasmModuleObject::ExtractUtf8StringFromModuleBytes(
            isolate, wire_bytes, module->name, kNoInternalize);
    name_str = isolate->factory()
                   ->NewConsString(module_name, name_hash)
                   .ToHandleChecked();
  } else {
    int name_chars = SNPrintF(ArrayVector(buffer), "%08x", hash);
    DCHECK(name_chars >= 0 && name_chars < kBufferSize);
    name_str = isolate->factory()
                   ->NewStringFromOneByte(
                       VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
                       AllocationType::kOld)
                   .ToHandleChecked();
  }
  script->set_name(*name_str);
  MaybeHandle<String> url_str;
  if (!source_url.empty()) {
    url_str =
        isolate->factory()->NewStringFromUtf8(source_url, AllocationType::kOld);
  } else {
    Handle<String> url_prefix =
        isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
    url_str = isolate->factory()->NewConsString(url_prefix, name_str);
  }
  script->set_source_url(*url_str.ToHandleChecked());

  auto source_map_url = VectorOf(module->source_map_url);
  if (!source_map_url.empty()) {
    MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
        source_map_url, AllocationType::kOld);
    script->set_source_mapping_url(*src_map_str.ToHandleChecked());
  }

  // Use the given shared {NativeModule}, but increase its reference count by
  // allocating a new {Managed<T>} that the {Script} references.
  size_t code_size_estimate = native_module->committed_code_space();
  size_t memory_estimate =
      code_size_estimate +
      wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
  Handle<Managed<wasm::NativeModule>> managed_native_module =
      Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
                                                 std::move(native_module));
  script->set_wasm_managed_native_module(*managed_native_module);
  script->set_wasm_breakpoint_infos(ReadOnlyRoots(isolate).empty_fixed_array());
  script->set_wasm_weak_instance_list(
      ReadOnlyRoots(isolate).empty_weak_array_list());
  return script;
}
}  // namespace

734
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
735 736 737
    Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module) {
  NativeModule* native_module = shared_native_module.get();
  ModuleWireBytes wire_bytes(native_module->wire_bytes());
738
  Handle<Script> script = GetOrCreateScript(isolate, shared_native_module);
739 740
  Handle<FixedArray> export_wrappers;
  CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
741
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
742
      isolate, std::move(shared_native_module), script, export_wrappers);
743 744 745 746
  {
    base::MutexGuard lock(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    isolates_[isolate]->native_modules.insert(native_module);
747 748
    DCHECK_EQ(1, native_modules_.count(native_module));
    native_modules_[native_module]->isolates.insert(isolate);
749
  }
750 751 752

  // Finish the Wasm script now and make it public to the debugger.
  isolate->debug()->OnAfterCompile(script);
753 754 755
  return module_object;
}

756
CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
757
  base::MutexGuard guard(&mutex_);
758 759 760 761 762 763 764
  if (compilation_stats_ == nullptr) {
    compilation_stats_.reset(new CompilationStatistics());
  }
  return compilation_stats_.get();
}

void WasmEngine::DumpAndResetTurboStatistics() {
765
  base::MutexGuard guard(&mutex_);
766 767 768 769 770 771 772
  if (compilation_stats_ != nullptr) {
    StdoutStream os;
    os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
  }
  compilation_stats_.reset();
}

773
CodeTracer* WasmEngine::GetCodeTracer() {
774
  base::MutexGuard guard(&mutex_);
775 776 777 778
  if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
  return code_tracer_.get();
}

779
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
780 781
    Isolate* isolate, const WasmFeatures& enabled,
    std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
782
    const char* api_method_name,
783
    std::shared_ptr<CompilationResultResolver> resolver) {
784 785
  AsyncCompileJob* job =
      new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
786
                          context, api_method_name, std::move(resolver));
787
  // Pass ownership to the unique_ptr in {async_compile_jobs_}.
788
  base::MutexGuard guard(&mutex_);
789
  async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
790 791 792
  return job;
}

793 794
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
    AsyncCompileJob* job) {
795
  base::MutexGuard guard(&mutex_);
796 797
  auto item = async_compile_jobs_.find(job);
  DCHECK(item != async_compile_jobs_.end());
798
  std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
799
  async_compile_jobs_.erase(item);
800
  return result;
801 802
}

803
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
804
  base::MutexGuard guard(&mutex_);
805
  DCHECK_EQ(1, isolates_.count(isolate));
806
  for (auto& entry : async_compile_jobs_) {
807 808 809 810 811
    if (entry.first->isolate() == isolate) return true;
  }
  return false;
}

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
  // Under the mutex get all jobs to delete. Then delete them without holding
  // the mutex, such that deletion can reenter the WasmEngine.
  std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
  {
    base::MutexGuard guard(&mutex_);
    for (auto it = async_compile_jobs_.begin();
         it != async_compile_jobs_.end();) {
      if (!it->first->context().is_identical_to(context)) {
        ++it;
        continue;
      }
      jobs_to_delete.push_back(std::move(it->second));
      it = async_compile_jobs_.erase(it);
    }
  }
}

830
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
831 832 833 834 835 836
  // Under the mutex get all jobs to delete. Then delete them without holding
  // the mutex, such that deletion can reenter the WasmEngine.
  std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
837 838
    for (auto it = async_compile_jobs_.begin();
         it != async_compile_jobs_.end();) {
839 840 841 842 843
      if (it->first->isolate() != isolate) {
        ++it;
        continue;
      }
      jobs_to_delete.push_back(std::move(it->second));
844
      it = async_compile_jobs_.erase(it);
845 846 847 848
    }
  }
}

849
void WasmEngine::AddIsolate(Isolate* isolate) {
850
  base::MutexGuard guard(&mutex_);
851
  DCHECK_EQ(0, isolates_.count(isolate));
852
  isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
853 854 855 856 857 858 859 860

  // Install sampling GC callback.
  // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
  // bias samples towards apps with high memory pressure. We should switch to
  // using sampling based on regular intervals independent of the GC.
  auto callback = [](v8::Isolate* v8_isolate, v8::GCType type,
                     v8::GCCallbackFlags flags, void* data) {
    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
861
    Counters* counters = isolate->counters();
862 863 864
    WasmEngine* engine = isolate->wasm_engine();
    base::MutexGuard lock(&engine->mutex_);
    DCHECK_EQ(1, engine->isolates_.count(isolate));
865 866
    for (auto* native_module : engine->isolates_[isolate]->native_modules) {
      native_module->SampleCodeSize(counters, NativeModule::kSampling);
867 868 869 870
    }
  };
  isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
                                         nullptr);
871 872 873 874 875
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (gdb_server_) {
    gdb_server_->AddIsolate(isolate);
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
876 877 878
}

void WasmEngine::RemoveIsolate(Isolate* isolate) {
879 880 881 882 883 884
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (gdb_server_) {
    gdb_server_->RemoveIsolate(isolate);
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

885
  base::MutexGuard guard(&mutex_);
886 887
  auto it = isolates_.find(isolate);
  DCHECK_NE(isolates_.end(), it);
888 889 890
  std::unique_ptr<IsolateInfo> info = std::move(it->second);
  isolates_.erase(it);
  for (NativeModule* native_module : info->native_modules) {
891 892 893 894
    DCHECK_EQ(1, native_modules_.count(native_module));
    DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
    auto* info = native_modules_[native_module].get();
    info->isolates.erase(isolate);
895 896 897 898 899
    if (current_gc_info_) {
      for (WasmCode* code : info->potentially_dead_code) {
        current_gc_info_->dead_code.erase(code);
      }
    }
900
  }
901 902 903
  if (current_gc_info_) {
    if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
  }
904 905 906 907 908
  if (auto* task = info->log_codes_task) task->Cancel();
  if (!info->code_to_log.empty()) {
    WasmCode::DecrementRefCount(VectorOf(info->code_to_log));
    info->code_to_log.clear();
  }
909 910
}

911 912
void WasmEngine::LogCode(Vector<WasmCode*> code_vec) {
  if (code_vec.empty()) return;
913
  base::MutexGuard guard(&mutex_);
914
  NativeModule* native_module = code_vec[0]->native_module();
915 916
  DCHECK_EQ(1, native_modules_.count(native_module));
  for (Isolate* isolate : native_modules_[native_module]->isolates) {
917 918
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
919
    if (info->log_codes == false) continue;
920
    if (info->log_codes_task == nullptr) {
921
      auto new_task = std::make_unique<LogCodesTask>(
922
          &mutex_, &info->log_codes_task, isolate, this);
923 924
      info->log_codes_task = new_task.get();
      info->foreground_task_runner->PostTask(std::move(new_task));
925 926
    }
    if (info->code_to_log.empty()) {
927
      isolate->stack_guard()->RequestLogWasmCode();
928
    }
929 930 931 932 933 934
    info->code_to_log.insert(info->code_to_log.end(), code_vec.begin(),
                             code_vec.end());
    for (WasmCode* code : code_vec) {
      DCHECK_EQ(native_module, code->native_module());
      code->IncRef();
    }
935 936 937
  }
}

938 939 940 941 942 943 944
void WasmEngine::EnableCodeLogging(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
  auto it = isolates_.find(isolate);
  DCHECK_NE(isolates_.end(), it);
  it->second->log_codes = true;
}

945 946 947 948
void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
  // If by now we should not log code any more, do not log it.
  if (!WasmCode::ShouldBeLogged(isolate)) return;

949 950 951 952 953 954 955 956 957 958
  // Under the mutex, get the vector of wasm code to log. Then log and decrement
  // the ref count without holding the mutex.
  std::vector<WasmCode*> code_to_log;
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    code_to_log.swap(isolates_[isolate]->code_to_log);
  }
  if (code_to_log.empty()) return;
  for (WasmCode* code : code_to_log) {
959 960
    code->LogCode(isolate);
  }
961
  WasmCode::DecrementRefCount(VectorOf(code_to_log));
962 963
}

964 965
std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
    Isolate* isolate, const WasmFeatures& enabled,
966
    std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
967 968
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (FLAG_wasm_gdb_remote && !gdb_server_) {
969
    gdb_server_ = gdb_server::GdbServer::Create();
970
    gdb_server_->AddIsolate(isolate);
971 972 973
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

974 975
  std::shared_ptr<NativeModule> native_module = code_manager_.NewNativeModule(
      this, isolate, enabled, code_size_estimate, std::move(module));
976
  base::MutexGuard lock(&mutex_);
977
  auto pair = native_modules_.insert(std::make_pair(
978
      native_module.get(), std::make_unique<NativeModuleInfo>()));
979 980
  DCHECK(pair.second);  // inserted new entry.
  pair.first->second.get()->isolates.insert(isolate);
981
  isolates_[isolate]->native_modules.insert(native_module.get());
982 983 984
  if (isolates_[isolate]->keep_tiered_down) {
    native_module->SetTieredDown();
  }
985 986 987
  return native_module;
}

988
std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
989 990 991 992 993 994 995 996 997 998 999 1000 1001
    ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
  std::shared_ptr<NativeModule> native_module =
      native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
  if (native_module) {
    base::MutexGuard guard(&mutex_);
    auto& native_module_info = native_modules_[native_module.get()];
    if (!native_module_info) {
      native_module_info = std::make_unique<NativeModuleInfo>();
    }
    native_module_info->isolates.insert(isolate);
    isolates_[isolate]->native_modules.insert(native_module.get());
  }
  return native_module;
1002 1003
}

1004
bool WasmEngine::UpdateNativeModuleCache(
1005 1006
    bool error, std::shared_ptr<NativeModule>* native_module,
    Isolate* isolate) {
1007 1008 1009 1010 1011
  // Pass {native_module} by value here to keep it alive until at least after
  // we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
  // which would lock the mutex twice.
  auto prev = native_module->get();
  *native_module = native_module_cache_.Update(*native_module, error);
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022

  if (prev == native_module->get()) return true;

  base::MutexGuard guard(&mutex_);
  auto& native_module_info = native_modules_[native_module->get()];
  if (!native_module_info) {
    native_module_info = std::make_unique<NativeModuleInfo>();
  }
  native_module_info->isolates.insert(isolate);
  isolates_[isolate]->native_modules.insert((*native_module).get());
  return false;
1023 1024 1025 1026 1027 1028 1029 1030
}

bool WasmEngine::GetStreamingCompilationOwnership(size_t prefix_hash) {
  return native_module_cache_.GetStreamingCompilationOwnership(prefix_hash);
}

void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
  native_module_cache_.StreamingCompilationFailed(prefix_hash);
1031 1032
}

1033
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
1034 1035 1036 1037 1038 1039 1040 1041
  base::MutexGuard guard(&mutex_);
  auto it = native_modules_.find(native_module);
  DCHECK_NE(native_modules_.end(), it);
  for (Isolate* isolate : it->second->isolates) {
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
    DCHECK_EQ(1, info->native_modules.count(native_module));
    info->native_modules.erase(native_module);
1042
    info->scripts.erase(native_module);
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
    // If there are {WasmCode} objects of the deleted {NativeModule}
    // outstanding to be logged in this isolate, remove them. Decrementing the
    // ref count is not needed, since the {NativeModule} dies anyway.
    size_t remaining = info->code_to_log.size();
    if (remaining > 0) {
      for (size_t i = 0; i < remaining; ++i) {
        while (i < remaining &&
               info->code_to_log[i]->native_module() == native_module) {
          // Move the last remaining item to this slot (this can be the same
          // as {i}, which is OK).
          info->code_to_log[i] = info->code_to_log[--remaining];
1054 1055
        }
      }
1056
      info->code_to_log.resize(remaining);
1057
    }
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
  }
  // If there is a GC running which has references to code contained in the
  // deleted {NativeModule}, remove those references.
  if (current_gc_info_) {
    for (auto it = current_gc_info_->dead_code.begin(),
              end = current_gc_info_->dead_code.end();
         it != end;) {
      if ((*it)->native_module() == native_module) {
        it = current_gc_info_->dead_code.erase(it);
      } else {
        ++it;
1069 1070
      }
    }
1071 1072
    TRACE_CODE_GC("Native module %p died, reducing dead code objects to %zu.\n",
                  native_module, current_gc_info_->dead_code.size());
1073
  }
1074
  native_module_cache_.Erase(native_module);
1075
  native_modules_.erase(it);
1076 1077
}

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
namespace {
class SampleTopTierCodeSizeTask : public CancelableTask {
 public:
  SampleTopTierCodeSizeTask(Isolate* isolate,
                            std::weak_ptr<NativeModule> native_module)
      : CancelableTask(isolate),
        isolate_(isolate),
        native_module_(std::move(native_module)) {}

  void RunInternal() override {
    if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
      native_module->SampleCodeSize(isolate_->counters(),
                                    NativeModule::kAfterTopTier);
    }
  }

 private:
  Isolate* const isolate_;
  const std::weak_ptr<NativeModule> native_module_;
};
}  // namespace

void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
    const std::shared_ptr<NativeModule>& native_module) {
  base::MutexGuard lock(&mutex_);
1103 1104
  DCHECK_EQ(1, native_modules_.count(native_module.get()));
  for (Isolate* isolate : native_modules_[native_module.get()]->isolates) {
1105 1106 1107
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
    info->foreground_task_runner->PostTask(
1108
        std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
1109 1110 1111
  }
}

1112 1113
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
                                     Vector<WasmCode*> live_code) {
1114
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ReportLiveCodeForGC");
1115 1116
  TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
                live_code.size());
1117
  base::MutexGuard guard(&mutex_);
1118 1119 1120
  // This report might come in late (note that we trigger both a stack guard and
  // a foreground task). In that case, ignore it.
  if (current_gc_info_ == nullptr) return;
1121
  if (!RemoveIsolateFromCurrentGC(isolate)) return;
1122 1123
  isolate->counters()->wasm_module_num_triggered_code_gcs()->AddSample(
      current_gc_info_->gc_sequence_index);
1124
  for (WasmCode* code : live_code) current_gc_info_->dead_code.erase(code);
1125
  PotentiallyFinishCurrentGC();
1126 1127
}

1128 1129 1130 1131 1132 1133 1134 1135 1136
void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
  wasm::WasmCodeRefScope code_ref_scope;
  std::unordered_set<wasm::WasmCode*> live_wasm_code;
  for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
    StackFrame* const frame = it.frame();
    if (frame->type() != StackFrame::WASM_COMPILED) continue;
    live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
  }

1137 1138
  CheckNoArchivedThreads(isolate);

1139 1140 1141 1142
  ReportLiveCodeForGC(isolate,
                      OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}

1143 1144 1145 1146
bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
  base::MutexGuard guard(&mutex_);
  auto it = native_modules_.find(code->native_module());
  DCHECK_NE(native_modules_.end(), it);
1147 1148 1149
  NativeModuleInfo* info = it->second.get();
  if (info->dead_code.count(code)) return false;  // Code is already dead.
  auto added = info->potentially_dead_code.insert(code);
1150 1151
  if (!added.second) return false;  // An entry already existed.
  new_potentially_dead_code_size_ += code->instructions().size();
1152
  if (FLAG_wasm_code_gc) {
1153
    // Trigger a GC if 64kB plus 10% of committed code are potentially dead.
1154 1155 1156
    size_t dead_code_limit =
        FLAG_stress_wasm_code_gc
            ? 0
1157
            : 64 * KB + code_manager_.committed_code_space() / 10;
1158
    if (new_potentially_dead_code_size_ > dead_code_limit) {
1159 1160
      bool inc_gc_count =
          info->num_code_gcs_triggered < std::numeric_limits<int8_t>::max();
1161
      if (current_gc_info_ == nullptr) {
1162
        if (inc_gc_count) ++info->num_code_gcs_triggered;
1163 1164 1165
        TRACE_CODE_GC(
            "Triggering GC (potentially dead: %zu bytes; limit: %zu bytes).\n",
            new_potentially_dead_code_size_, dead_code_limit);
1166 1167 1168
        TriggerGC(info->num_code_gcs_triggered);
      } else if (current_gc_info_->next_gc_sequence_index == 0) {
        if (inc_gc_count) ++info->num_code_gcs_triggered;
1169 1170 1171 1172
        TRACE_CODE_GC(
            "Scheduling another GC after the current one (potentially dead: "
            "%zu bytes; limit: %zu bytes).\n",
            new_potentially_dead_code_size_, dead_code_limit);
1173 1174
        current_gc_info_->next_gc_sequence_index = info->num_code_gcs_triggered;
        DCHECK_NE(0, current_gc_info_->next_gc_sequence_index);
1175
      }
1176
    }
1177
  }
1178 1179 1180
  return true;
}

1181 1182 1183 1184 1185 1186
void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
  base::MutexGuard guard(&mutex_);
  FreeDeadCodeLocked(dead_code);
}

void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
1187
  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FreeDeadCode");
1188 1189 1190 1191
  DCHECK(!mutex_.TryLock());
  for (auto& dead_code_entry : dead_code) {
    NativeModule* native_module = dead_code_entry.first;
    const std::vector<WasmCode*>& code_vec = dead_code_entry.second;
1192 1193
    DCHECK_EQ(1, native_modules_.count(native_module));
    auto* info = native_modules_[native_module].get();
1194 1195 1196
    TRACE_CODE_GC("Freeing %zu code object%s of module %p.\n", code_vec.size(),
                  code_vec.size() == 1 ? "" : "s", native_module);
    for (WasmCode* code : code_vec) {
1197 1198 1199
      DCHECK_EQ(1, info->dead_code.count(code));
      info->dead_code.erase(code);
    }
1200
    native_module->FreeCode(VectorOf(code_vec));
1201 1202 1203
  }
}

1204 1205 1206
Handle<Script> WasmEngine::GetOrCreateScript(
    Isolate* isolate, const std::shared_ptr<NativeModule>& native_module,
    Vector<const char> source_url) {
1207 1208 1209 1210
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    auto& scripts = isolates_[isolate]->scripts;
1211
    auto it = scripts.find(native_module.get());
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
    if (it != scripts.end()) {
      Handle<Script> weak_global_handle = it->second.handle();
      if (weak_global_handle.is_null()) {
        scripts.erase(it);
      } else {
        return Handle<Script>::New(*weak_global_handle, isolate);
      }
    }
  }
  // Temporarily release the mutex to let the GC collect native modules.
1222
  auto script = CreateWasmScript(isolate, native_module, source_url);
1223 1224 1225 1226
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    auto& scripts = isolates_[isolate]->scripts;
1227 1228
    DCHECK_EQ(0, scripts.count(native_module.get()));
    scripts.emplace(native_module.get(), WeakScriptHandle(script));
1229 1230 1231 1232
    return script;
  }
}

1233
void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
1234
  DCHECK(!mutex_.TryLock());
1235 1236
  DCHECK_NULL(current_gc_info_);
  DCHECK(FLAG_wasm_code_gc);
1237
  new_potentially_dead_code_size_ = 0;
1238
  current_gc_info_.reset(new CurrentGCInfo(gc_sequence_index));
1239 1240 1241 1242 1243 1244 1245 1246
  // Add all potentially dead code to this GC, and trigger a GC task in each
  // isolate.
  for (auto& entry : native_modules_) {
    NativeModuleInfo* info = entry.second.get();
    if (info->potentially_dead_code.empty()) continue;
    for (auto* isolate : native_modules_[entry.first]->isolates) {
      auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
      if (!gc_task) {
1247
        auto new_task = std::make_unique<WasmGCForegroundTask>(isolate);
1248 1249 1250 1251 1252
        gc_task = new_task.get();
        DCHECK_EQ(1, isolates_.count(isolate));
        isolates_[isolate]->foreground_task_runner->PostTask(
            std::move(new_task));
      }
1253
      isolate->stack_guard()->RequestWasmCodeGC();
1254 1255 1256 1257 1258
    }
    for (WasmCode* code : info->potentially_dead_code) {
      current_gc_info_->dead_code.insert(code);
    }
  }
1259 1260 1261
  TRACE_CODE_GC(
      "Starting GC. Total number of potentially dead code objects: %zu\n",
      current_gc_info_->dead_code.size());
1262 1263 1264 1265 1266
  // Ensure that there are outstanding isolates that will eventually finish this
  // GC. If there are no outstanding isolates, we finish the GC immediately.
  PotentiallyFinishCurrentGC();
  DCHECK(current_gc_info_ == nullptr ||
         !current_gc_info_->outstanding_isolates.empty());
1267 1268
}

1269 1270 1271
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
  DCHECK(!mutex_.TryLock());
  DCHECK_NOT_NULL(current_gc_info_);
1272
  return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
}

void WasmEngine::PotentiallyFinishCurrentGC() {
  DCHECK(!mutex_.TryLock());
  TRACE_CODE_GC(
      "Remaining dead code objects: %zu; outstanding isolates: %zu.\n",
      current_gc_info_->dead_code.size(),
      current_gc_info_->outstanding_isolates.size());

  // If there are more outstanding isolates, return immediately.
  if (!current_gc_info_->outstanding_isolates.empty()) return;

  // All remaining code in {current_gc_info->dead_code} is really dead.
  // Move it from the set of potentially dead code to the set of dead code,
  // and decrement its ref count.
1288
  size_t num_freed = 0;
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
  DeadCodeMap dead_code;
  for (WasmCode* code : current_gc_info_->dead_code) {
    DCHECK_EQ(1, native_modules_.count(code->native_module()));
    auto* native_module_info = native_modules_[code->native_module()].get();
    DCHECK_EQ(1, native_module_info->potentially_dead_code.count(code));
    native_module_info->potentially_dead_code.erase(code);
    DCHECK_EQ(0, native_module_info->dead_code.count(code));
    native_module_info->dead_code.insert(code);
    if (code->DecRefOnDeadCode()) {
      dead_code[code->native_module()].push_back(code);
1299
      ++num_freed;
1300 1301
    }
  }
1302 1303 1304

  FreeDeadCodeLocked(dead_code);

1305 1306
  TRACE_CODE_GC("Found %zu dead code objects, freed %zu.\n",
                current_gc_info_->dead_code.size(), num_freed);
1307 1308
  USE(num_freed);

1309
  int8_t next_gc_sequence_index = current_gc_info_->next_gc_sequence_index;
1310
  current_gc_info_.reset();
1311
  if (next_gc_sequence_index != 0) TriggerGC(next_gc_sequence_index);
1312 1313
}

1314 1315
namespace {

1316
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
1317
                                GetSharedWasmEngine)
1318 1319 1320

}  // namespace

1321
// static
1322
void WasmEngine::InitializeOncePerProcess() {
1323
  *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
1324 1325
}

1326
// static
1327
void WasmEngine::GlobalTearDown() {
1328
  GetSharedWasmEngine()->reset();
1329 1330
}

1331
// static
1332
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
1333
  return *GetSharedWasmEngine();
1334 1335
}

1336 1337
// {max_initial_mem_pages} is declared in wasm-limits.h.
uint32_t max_initial_mem_pages() {
1338 1339 1340 1341
  STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
  return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}

1342 1343 1344 1345 1346 1347
uint32_t max_maximum_mem_pages() {
  STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
  return std::min(uint32_t{kV8MaxWasmMemoryPages},
                  FLAG_wasm_max_mem_pages_growth);
}

1348 1349 1350 1351 1352 1353
// {max_table_init_entries} is declared in wasm-limits.h.
uint32_t max_table_init_entries() {
  return std::min(uint32_t{kV8MaxWasmTableInitEntries},
                  FLAG_wasm_max_table_size);
}

1354 1355
#undef TRACE_CODE_GC

1356 1357 1358
}  // namespace wasm
}  // namespace internal
}  // namespace v8