wasm-engine.cc 54.4 KB
Newer Older
1 2 3 4 5
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/wasm/wasm-engine.h"
6

7
#include "src/base/functional.h"
8
#include "src/base/platform/time.h"
9
#include "src/common/globals.h"
10
#include "src/diagnostics/code-tracer.h"
11
#include "src/diagnostics/compilation-statistics.h"
12
#include "src/execution/frames.h"
13
#include "src/execution/v8threads.h"
14
#include "src/logging/counters.h"
15
#include "src/objects/heap-number.h"
16
#include "src/objects/js-promise.h"
17
#include "src/objects/objects-inl.h"
18
#include "src/strings/string-hasher-inl.h"
19
#include "src/utils/ostreams.h"
20
#include "src/wasm/function-compiler.h"
21
#include "src/wasm/module-compiler.h"
Marja Hölttä's avatar
Marja Hölttä committed
22
#include "src/wasm/module-decoder.h"
23
#include "src/wasm/module-instantiate.h"
Marja Hölttä's avatar
Marja Hölttä committed
24
#include "src/wasm/streaming-decoder.h"
25
#include "src/wasm/wasm-debug.h"
26
#include "src/wasm/wasm-limits.h"
27
#include "src/wasm/wasm-objects-inl.h"
28

29 30 31 32
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
#include "src/debug/wasm/gdb-server/gdb-server.h"
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

33 34 35 36
namespace v8 {
namespace internal {
namespace wasm {

37 38 39 40 41
#define TRACE_CODE_GC(...)                                         \
  do {                                                             \
    if (FLAG_trace_wasm_code_gc) PrintF("[wasm-gc] " __VA_ARGS__); \
  } while (false)

42
namespace {
43 44 45
// A task to log a set of {WasmCode} objects in an isolate. It does not own any
// data itself, since it is owned by the platform, so lifetime is not really
// bound to the wasm engine.
46 47
class LogCodesTask : public Task {
 public:
48 49 50 51 52 53
  LogCodesTask(base::Mutex* mutex, LogCodesTask** task_slot, Isolate* isolate,
               WasmEngine* engine)
      : mutex_(mutex),
        task_slot_(task_slot),
        isolate_(isolate),
        engine_(engine) {
54 55 56 57
    DCHECK_NOT_NULL(task_slot);
    DCHECK_NOT_NULL(isolate);
  }

58
  ~LogCodesTask() override {
59 60 61
    // If the platform deletes this task before executing it, we also deregister
    // it to avoid use-after-free from still-running background threads.
    if (!cancelled()) DeregisterTask();
62
  }
63 64

  void Run() override {
65 66
    if (cancelled()) return;
    DeregisterTask();
67
    engine_->LogOutstandingCodesForIsolate(isolate_);
68 69 70 71 72 73 74 75
  }

  void Cancel() {
    // Cancel will only be called on Isolate shutdown, which happens on the
    // Isolate's foreground thread. Thus no synchronization needed.
    isolate_ = nullptr;
  }

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  bool cancelled() const { return isolate_ == nullptr; }

  void DeregisterTask() {
    // The task will only be deregistered from the foreground thread (executing
    // this task or calling its destructor), thus we do not need synchronization
    // on this field access.
    if (task_slot_ == nullptr) return;  // already deregistered.
    // Remove this task from the {IsolateInfo} in the engine. The next
    // logging request will allocate and schedule a new task.
    base::MutexGuard guard(mutex_);
    DCHECK_EQ(this, *task_slot_);
    *task_slot_ = nullptr;
    task_slot_ = nullptr;
  }

91 92 93 94
 private:
  // The mutex of the WasmEngine.
  base::Mutex* const mutex_;
  // The slot in the WasmEngine where this LogCodesTask is stored. This is
95 96
  // cleared by this task before execution or on task destruction.
  LogCodesTask** task_slot_;
97
  Isolate* isolate_;
98
  WasmEngine* const engine_;
99
};
100

101 102 103 104 105 106 107 108 109 110 111 112
void CheckNoArchivedThreads(Isolate* isolate) {
  class ArchivedThreadsVisitor : public ThreadVisitor {
    void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
      // Archived threads are rarely used, and not combined with Wasm at the
      // moment. Implement this and test it properly once we have a use case for
      // that.
      FATAL("archived threads in combination with wasm not supported");
    }
  } archived_threads_visitor;
  isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
}

113
class WasmGCForegroundTask : public CancelableTask {
114
 public:
115 116
  explicit WasmGCForegroundTask(Isolate* isolate)
      : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
117

118
  void RunInternal() final {
119 120 121
    WasmEngine* engine = isolate_->wasm_engine();
    // If the foreground task is executing, there is no wasm code active. Just
    // report an empty set of live wasm code.
122 123
#ifdef ENABLE_SLOW_DCHECKS
    for (StackFrameIterator it(isolate_); !it.done(); it.Advance()) {
124
      DCHECK_NE(StackFrame::WASM, it.frame()->type());
125 126
    }
#endif
127
    CheckNoArchivedThreads(isolate_);
128 129 130 131 132 133 134
    engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
  }

 private:
  Isolate* isolate_;
};

135 136 137 138 139 140 141 142 143
class WeakScriptHandle {
 public:
  explicit WeakScriptHandle(Handle<Script> handle) {
    auto global_handle =
        handle->GetIsolate()->global_handles()->Create(*handle);
    location_ = std::make_unique<Address*>(global_handle.location());
    GlobalHandles::MakeWeak(location_.get());
  }

144 145 146 147 148 149 150
  // Usually the destructor of this class should always be called after the weak
  // callback because the Script keeps the NativeModule alive. So we expect the
  // handle to be destroyed and the location to be reset already.
  // We cannot check this because of one exception. When the native module is
  // freed during isolate shutdown, the destructor will be called
  // first, and the callback will never be called.
  ~WeakScriptHandle() = default;
151

152
  WeakScriptHandle(WeakScriptHandle&&) V8_NOEXCEPT = default;
153 154 155 156 157 158 159 160 161

  Handle<Script> handle() { return Handle<Script>(*location_); }

 private:
  // Store the location in a unique_ptr so that its address stays the same even
  // when this object is moved/copied.
  std::unique_ptr<Address*> location_;
};

162 163
}  // namespace

164 165 166 167
std::shared_ptr<NativeModule> NativeModuleCache::MaybeGetNativeModule(
    ModuleOrigin origin, Vector<const uint8_t> wire_bytes) {
  if (origin != kWasmOrigin) return nullptr;
  base::MutexGuard lock(&mutex_);
168 169
  size_t prefix_hash = PrefixHash(wire_bytes);
  NativeModuleCache::Key key{prefix_hash, wire_bytes};
170
  while (true) {
171
    auto it = map_.find(key);
172
    if (it == map_.end()) {
173 174 175 176 177 178 179
      // Even though this exact key is not in the cache, there might be a
      // matching prefix hash indicating that a streaming compilation is
      // currently compiling a module with the same prefix. {OnFinishedStream}
      // happens on the main thread too, so waiting for streaming compilation to
      // finish would create a deadlock. Instead, compile the module twice and
      // handle the conflict in {UpdateNativeModuleCache}.

180 181
      // Insert a {nullopt} entry to let other threads know that this
      // {NativeModule} is already being created on another thread.
182 183 184
      auto p = map_.emplace(key, base::nullopt);
      USE(p);
      DCHECK(p.second);
185 186
      return nullptr;
    }
187 188
    if (it->second.has_value()) {
      if (auto shared_native_module = it->second.value().lock()) {
189
        DCHECK_EQ(shared_native_module->wire_bytes(), wire_bytes);
190 191 192 193 194 195 196
        return shared_native_module;
      }
    }
    cache_cv_.Wait(&mutex_);
  }
}

197 198 199 200
bool NativeModuleCache::GetStreamingCompilationOwnership(size_t prefix_hash) {
  base::MutexGuard lock(&mutex_);
  auto it = map_.lower_bound(Key{prefix_hash, {}});
  if (it != map_.end() && it->first.prefix_hash == prefix_hash) {
201 202
    DCHECK_IMPLIES(!it->first.bytes.empty(),
                   PrefixHash(it->first.bytes) == prefix_hash);
203 204 205
    return false;
  }
  Key key{prefix_hash, {}};
206 207 208
  DCHECK_EQ(0, map_.count(key));
  map_.emplace(key, base::nullopt);
  return true;
209 210 211 212
}

void NativeModuleCache::StreamingCompilationFailed(size_t prefix_hash) {
  base::MutexGuard lock(&mutex_);
213 214 215
  Key key{prefix_hash, {}};
  DCHECK_EQ(1, map_.count(key));
  map_.erase(key);
216 217 218 219 220
  cache_cv_.NotifyAll();
}

std::shared_ptr<NativeModule> NativeModuleCache::Update(
    std::shared_ptr<NativeModule> native_module, bool error) {
221
  DCHECK_NOT_NULL(native_module);
222
  if (native_module->module()->origin != kWasmOrigin) return native_module;
223
  Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
224
  DCHECK(!wire_bytes.empty());
225
  size_t prefix_hash = PrefixHash(native_module->wire_bytes());
226
  base::MutexGuard lock(&mutex_);
227 228 229 230 231 232 233
  map_.erase(Key{prefix_hash, {}});
  const Key key{prefix_hash, wire_bytes};
  auto it = map_.find(key);
  if (it != map_.end()) {
    if (it->second.has_value()) {
      auto conflicting_module = it->second.value().lock();
      if (conflicting_module != nullptr) {
234
        DCHECK_EQ(conflicting_module->wire_bytes(), wire_bytes);
235 236 237 238 239
        return conflicting_module;
      }
    }
    map_.erase(it);
  }
240
  if (!error) {
241 242 243 244 245 246 247
    // The key now points to the new native module's owned copy of the bytes,
    // so that it stays valid until the native module is freed and erased from
    // the map.
    auto p = map_.emplace(
        key, base::Optional<std::weak_ptr<NativeModule>>(native_module));
    USE(p);
    DCHECK(p.second);
248 249
  }
  cache_cv_.NotifyAll();
250
  return native_module;
251 252 253
}

void NativeModuleCache::Erase(NativeModule* native_module) {
254 255
  if (native_module->module()->origin != kWasmOrigin) return;
  // Happens in some tests where bytes are set directly.
256
  if (native_module->wire_bytes().empty()) return;
257
  base::MutexGuard lock(&mutex_);
258
  size_t prefix_hash = PrefixHash(native_module->wire_bytes());
259 260
  map_.erase(Key{prefix_hash, native_module->wire_bytes()});
  cache_cv_.NotifyAll();
261 262
}

263 264
// static
size_t NativeModuleCache::WireBytesHash(Vector<const uint8_t> bytes) {
265 266 267 268 269
  return StringHasher::HashSequentialString(
      reinterpret_cast<const char*>(bytes.begin()), bytes.length(),
      kZeroHashSeed);
}

270 271 272 273 274 275 276 277 278 279 280 281
// static
size_t NativeModuleCache::PrefixHash(Vector<const uint8_t> wire_bytes) {
  // Compute the hash as a combined hash of the sections up to the code section
  // header, to mirror the way streaming compilation does it.
  Decoder decoder(wire_bytes.begin(), wire_bytes.end());
  decoder.consume_bytes(8, "module header");
  size_t hash = NativeModuleCache::WireBytesHash(wire_bytes.SubVector(0, 8));
  SectionCode section_id = SectionCode::kUnknownSectionCode;
  while (decoder.ok() && decoder.more()) {
    section_id = static_cast<SectionCode>(decoder.consume_u8());
    uint32_t section_size = decoder.consume_u32v("section size");
    if (section_id == SectionCode::kCodeSectionCode) {
282 283 284 285 286 287
      uint32_t num_functions = decoder.consume_u32v("num functions");
      // If {num_functions} is 0, the streaming decoder skips the section. Do
      // the same here to ensure hashes are consistent.
      if (num_functions != 0) {
        hash = base::hash_combine(hash, section_size);
      }
288 289 290 291 292 293 294 295 296 297 298
      break;
    }
    const uint8_t* payload_start = decoder.pc();
    decoder.consume_bytes(section_size, "section payload");
    size_t section_hash = NativeModuleCache::WireBytesHash(
        Vector<const uint8_t>(payload_start, section_size));
    hash = base::hash_combine(hash, section_hash);
  }
  return hash;
}

299
struct WasmEngine::CurrentGCInfo {
300 301 302 303 304
  explicit CurrentGCInfo(int8_t gc_sequence_index)
      : gc_sequence_index(gc_sequence_index) {
    DCHECK_NE(0, gc_sequence_index);
  }

305 306 307 308 309 310 311
  // Set of isolates that did not scan their stack yet for used WasmCode, and
  // their scheduled foreground task.
  std::unordered_map<Isolate*, WasmGCForegroundTask*> outstanding_isolates;

  // Set of dead code. Filled with all potentially dead code on initialization.
  // Code that is still in-use is removed by the individual isolates.
  std::unordered_set<WasmCode*> dead_code;
312

313 314 315 316 317
  // The number of GCs triggered in the native module that triggered this GC.
  // This is stored in the histogram for each participating isolate during
  // execution of that isolate's foreground task.
  const int8_t gc_sequence_index;

318 319
  // If during this GC, another GC was requested, we skipped that other GC (we
  // only run one GC at a time). Remember though to trigger another one once
320 321 322 323
  // this one finishes. {next_gc_sequence_index} is 0 if no next GC is needed,
  // and >0 otherwise. It stores the {num_code_gcs_triggered} of the native
  // module which triggered the next GC.
  int8_t next_gc_sequence_index = 0;
324 325 326 327

  // The start time of this GC; used for tracing and sampled via {Counters}.
  // Can be null ({TimeTicks::IsNull()}) if timer is not high resolution.
  base::TimeTicks start_time;
328 329
};

330
struct WasmEngine::IsolateInfo {
331
  explicit IsolateInfo(Isolate* isolate)
332 333
      : log_codes(WasmCode::ShouldBeLogged(isolate)),
        async_counters(isolate->async_counters()) {
334 335 336 337 338
    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
    v8::Platform* platform = V8::GetCurrentPlatform();
    foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
  }

339 340 341 342 343 344 345 346
#ifdef DEBUG
  ~IsolateInfo() {
    // Before destructing, the {WasmEngine} must have cleared outstanding code
    // to log.
    DCHECK_EQ(0, code_to_log.size());
  }
#endif

347 348
  // All native modules that are being used by this Isolate.
  std::unordered_map<NativeModule*, std::weak_ptr<NativeModule>> native_modules;
349

350 351 352
  // Scripts created for each native module in this isolate.
  std::unordered_map<NativeModule*, WeakScriptHandle> scripts;

353 354 355
  // Caches whether code needs to be logged on this isolate.
  bool log_codes;

356 357 358
  // The currently scheduled LogCodesTask.
  LogCodesTask* log_codes_task = nullptr;

359 360 361
  // The vector of code objects that still need to be logged in this isolate.
  std::vector<WasmCode*> code_to_log;

362 363
  // The foreground task runner of the isolate (can be called from background).
  std::shared_ptr<v8::TaskRunner> foreground_task_runner;
364 365

  const std::shared_ptr<Counters> async_counters;
366 367 368

  // Keep new modules in tiered down state.
  bool keep_tiered_down = false;
369 370
};

371 372 373 374
struct WasmEngine::NativeModuleInfo {
  // Set of isolates using this NativeModule.
  std::unordered_set<Isolate*> isolates;

375 376 377 378
  // Set of potentially dead code. This set holds one ref for each code object,
  // until code is detected to be really dead. At that point, the ref count is
  // decremented and code is move to the {dead_code} set. If the code is finally
  // deleted, it is also removed from {dead_code}.
379
  std::unordered_set<WasmCode*> potentially_dead_code;
380 381 382 383

  // Code that is not being executed in any isolate any more, but the ref count
  // did not drop to zero yet.
  std::unordered_set<WasmCode*> dead_code;
384 385 386 387

  // Number of code GCs triggered because code in this native module became
  // potentially dead.
  int8_t num_code_gcs_triggered = 0;
388 389
};

390
WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
391

392
WasmEngine::~WasmEngine() {
393 394
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  // Synchronize on the GDB-remote thread, if running.
395
  gdb_server_.reset();
396 397
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

398 399
  // Synchronize on all background compile tasks.
  background_compile_task_manager_.CancelAndWait();
400
  // All AsyncCompileJobs have been canceled.
401
  DCHECK(async_compile_jobs_.empty());
402 403
  // All Isolates have been deregistered.
  DCHECK(isolates_.empty());
404
  // All NativeModules did die.
405
  DCHECK(native_modules_.empty());
406 407
  // Native module cache does not leak.
  DCHECK(native_module_cache_.empty());
408
}
409

410 411
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
                              const ModuleWireBytes& bytes) {
412
  TRACE_EVENT0("v8.wasm", "wasm.SyncValidate");
413 414
  // TODO(titzer): remove dependency on the isolate.
  if (bytes.start() == nullptr || bytes.length() == 0) return false;
415
  ModuleResult result =
416
      DecodeWasmModule(enabled, bytes.start(), bytes.end(), true, kWasmOrigin,
417
                       isolate->counters(), allocator());
418 419 420
  return result.ok();
}

421
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
422
    Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
423
    Vector<const byte> asm_js_offset_table_bytes,
424
    Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
425
  TRACE_EVENT0("v8.wasm", "wasm.SyncCompileTranslatedAsmJs");
426 427 428
  ModuleOrigin origin = language_mode == LanguageMode::kSloppy
                            ? kAsmJsSloppyOrigin
                            : kAsmJsStrictOrigin;
429
  ModuleResult result =
430 431
      DecodeWasmModule(WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(),
                       false, origin, isolate->counters(), allocator());
432 433 434 435 436 437
  if (result.failed()) {
    // This happens once in a while when we have missed some limit check
    // in the asm parser. Output an error message to help diagnose, but crash.
    std::cout << result.error().message();
    UNREACHABLE();
  }
438

439 440 441
  result.value()->asm_js_offset_information =
      std::make_unique<AsmJsOffsetInformation>(asm_js_offset_table_bytes);

442
  // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
443 444
  // in {CompileToNativeModule}.
  Handle<FixedArray> export_wrappers;
445
  std::shared_ptr<NativeModule> native_module =
446
      CompileToNativeModule(isolate, WasmFeatures::ForAsmjs(), thrower,
447 448 449 450
                            std::move(result).value(), bytes, &export_wrappers);
  if (!native_module) return {};

  return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
451
                          uses_bitset);
452 453 454 455 456 457
}

Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
    Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
    Handle<Script> script) {
  std::shared_ptr<NativeModule> native_module =
458
      asm_wasm_data->managed_native_module().get();
459 460
  Handle<FixedArray> export_wrappers =
      handle(asm_wasm_data->export_wrappers(), isolate);
461 462
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
      isolate, std::move(native_module), script, export_wrappers);
463
  return module_object;
464 465 466
}

MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
467 468
    Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
    const ModuleWireBytes& bytes) {
469
  TRACE_EVENT0("v8.wasm", "wasm.SyncCompile");
470
  ModuleResult result =
471
      DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
472
                       isolate->counters(), allocator());
473
  if (result.failed()) {
474
    thrower->CompileFailed(result.error());
475 476 477
    return {};
  }

478
  // Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
479
  // in {CompileToModuleObject}.
480
  Handle<FixedArray> export_wrappers;
481
  std::shared_ptr<NativeModule> native_module =
482 483 484 485
      CompileToNativeModule(isolate, enabled, thrower,
                            std::move(result).value(), bytes, &export_wrappers);
  if (!native_module) return {};

486 487 488 489 490 491 492 493 494 495 496
#ifdef DEBUG
  // Ensure that code GC will check this isolate for live code.
  {
    base::MutexGuard lock(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    DCHECK_EQ(1, isolates_[isolate]->native_modules.count(native_module.get()));
    DCHECK_EQ(1, native_modules_.count(native_module.get()));
    DCHECK_EQ(1, native_modules_[native_module.get()]->isolates.count(isolate));
  }
#endif

497
  Handle<Script> script = GetOrCreateScript(isolate, native_module);
498 499 500 501 502

  // Create the compiled module object and populate with compiled functions
  // and information needed at instantiation time. This object needs to be
  // serializable. Instantiation may occur off a deserialized version of this
  // object.
503 504
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
      isolate, std::move(native_module), script, export_wrappers);
505 506 507 508

  // Finish the Wasm script now and make it public to the debugger.
  isolate->debug()->OnAfterCompile(script);
  return module_object;
509 510 511 512 513 514
}

MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
    Isolate* isolate, ErrorThrower* thrower,
    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
    MaybeHandle<JSArrayBuffer> memory) {
515
  TRACE_EVENT0("v8.wasm", "wasm.SyncInstantiate");
516 517 518 519
  return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
                                     memory);
}

520 521 522
void WasmEngine::AsyncInstantiate(
    Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
523
  ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
524
  TRACE_EVENT0("v8.wasm", "wasm.AsyncInstantiate");
525 526
  // Instantiate a TryCatch so that caught exceptions won't progagate out.
  // They will still be set as pending exceptions on the isolate.
527
  // TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
528 529 530 531 532
  // start function and report thrown exception explicitly via out argument.
  v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
  catcher.SetVerbose(false);
  catcher.SetCaptureMessage(false);

533 534
  MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
      isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
535 536 537 538 539 540

  if (!instance_object.is_null()) {
    resolver->OnInstantiationSucceeded(instance_object.ToHandleChecked());
    return;
  }

541 542 543
  if (isolate->has_pending_exception()) {
    // The JS code executed during instantiation has thrown an exception.
    // We have to move the exception to the promise chain.
544 545 546 547
    Handle<Object> exception(isolate->pending_exception(), isolate);
    isolate->clear_pending_exception();
    *isolate->external_caught_exception_address() = false;
    resolver->OnInstantiationFailed(exception);
548 549 550 551
    thrower.Reset();
  } else {
    DCHECK(thrower.error());
    resolver->OnInstantiationFailed(thrower.Reify());
552 553 554
  }
}

555
void WasmEngine::AsyncCompile(
556
    Isolate* isolate, const WasmFeatures& enabled,
557
    std::shared_ptr<CompilationResultResolver> resolver,
558 559
    const ModuleWireBytes& bytes, bool is_shared,
    const char* api_method_name_for_errors) {
560
  TRACE_EVENT0("v8.wasm", "wasm.AsyncCompile");
561 562
  if (!FLAG_wasm_async_compilation) {
    // Asynchronous compilation disabled; fall back on synchronous compilation.
563
    ErrorThrower thrower(isolate, api_method_name_for_errors);
564 565 566 567 568
    MaybeHandle<WasmModuleObject> module_object;
    if (is_shared) {
      // Make a copy of the wire bytes to avoid concurrent modification.
      std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
      memcpy(copy.get(), bytes.start(), bytes.length());
569
      ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
570
      module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy);
571 572
    } else {
      // The wire bytes are not shared, OK to use them directly.
573
      module_object = SyncCompile(isolate, enabled, &thrower, bytes);
574 575
    }
    if (thrower.error()) {
576
      resolver->OnCompilationFailed(thrower.Reify());
577 578 579
      return;
    }
    Handle<WasmModuleObject> module = module_object.ToHandleChecked();
580
    resolver->OnCompilationSucceeded(module);
581 582 583 584 585
    return;
  }

  if (FLAG_wasm_test_streaming) {
    std::shared_ptr<StreamingDecoder> streaming_decoder =
586 587 588
        StartStreamingCompilation(
            isolate, enabled, handle(isolate->context(), isolate),
            api_method_name_for_errors, std::move(resolver));
589 590 591 592 593 594 595 596
    streaming_decoder->OnBytesReceived(bytes.module_bytes());
    streaming_decoder->Finish();
    return;
  }
  // Make a copy of the wire bytes in case the user program changes them
  // during asynchronous compilation.
  std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
  memcpy(copy.get(), bytes.start(), bytes.length());
597

598 599 600 601
  AsyncCompileJob* job =
      CreateAsyncCompileJob(isolate, enabled, std::move(copy), bytes.length(),
                            handle(isolate->context(), isolate),
                            api_method_name_for_errors, std::move(resolver));
602 603 604 605
  job->Start();
}

std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
606
    Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
607
    const char* api_method_name,
608
    std::shared_ptr<CompilationResultResolver> resolver) {
609
  TRACE_EVENT0("v8.wasm", "wasm.StartStreamingCompilation");
610 611 612 613 614 615 616 617
  if (FLAG_wasm_async_compilation) {
    AsyncCompileJob* job = CreateAsyncCompileJob(
        isolate, enabled, std::unique_ptr<byte[]>(nullptr), 0, context,
        api_method_name, std::move(resolver));
    return job->CreateStreamingDecoder();
  }
  return StreamingDecoder::CreateSyncStreamingDecoder(
      isolate, enabled, context, api_method_name, std::move(resolver));
618 619
}

620
void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
621
                                 uint32_t function_index, ExecutionTier tier) {
622
  // Note we assume that "one-off" compilations can discard detected features.
623
  WasmFeatures detected = WasmFeatures::None();
624
  WasmCompilationUnit::CompileWasmFunction(
625
      isolate, native_module, &detected,
626
      &native_module->module()->functions[function_index], tier);
627 628
}

629
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
630
  std::vector<std::shared_ptr<NativeModule>> native_modules;
631 632
  {
    base::MutexGuard lock(&mutex_);
633 634
    if (isolates_[isolate]->keep_tiered_down) return;
    isolates_[isolate]->keep_tiered_down = true;
635 636 637 638 639
    for (auto& entry : isolates_[isolate]->native_modules) {
      entry.first->SetTieringState(kTieredDown);
      if (auto shared_ptr = entry.second.lock()) {
        native_modules.emplace_back(std::move(shared_ptr));
      }
640 641
    }
  }
642
  for (auto& native_module : native_modules) {
643
    native_module->RecompileForTiering();
644 645 646
  }
}

647
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
648 649
  // Only trigger recompilation after releasing the mutex, otherwise we risk
  // deadlocks because of lock inversion.
650
  std::vector<std::shared_ptr<NativeModule>> native_modules_to_recompile;
651 652 653
  {
    base::MutexGuard lock(&mutex_);
    isolates_[isolate]->keep_tiered_down = false;
654 655 656 657 658 659 660 661
    auto test_keep_tiered_down = [this](NativeModule* native_module) {
      DCHECK_EQ(1, native_modules_.count(native_module));
      for (auto* isolate : native_modules_[native_module]->isolates) {
        DCHECK_EQ(1, isolates_.count(isolate));
        if (isolates_[isolate]->keep_tiered_down) return true;
      }
      return false;
    };
662 663
    for (auto& entry : isolates_[isolate]->native_modules) {
      auto* native_module = entry.first;
664 665 666 667 668
      if (!native_module->IsTieredDown()) continue;
      // Only start tier-up if no other isolate needs this modules in tiered
      // down state.
      if (test_keep_tiered_down(native_module)) continue;
      native_module->SetTieringState(kTieredUp);
669 670 671
      if (auto shared_ptr = entry.second.lock()) {
        native_modules_to_recompile.emplace_back(std::move(shared_ptr));
      }
672
    }
673
  }
674
  for (auto& native_module : native_modules_to_recompile) {
675
    native_module->RecompileForTiering();
676 677 678
  }
}

679 680
std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
    Handle<WasmModuleObject> module_object) {
681
  return module_object->shared_native_module();
682 683
}

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
namespace {
Handle<Script> CreateWasmScript(Isolate* isolate,
                                std::shared_ptr<NativeModule> native_module,
                                Vector<const char> source_url = {}) {
  Handle<Script> script =
      isolate->factory()->NewScript(isolate->factory()->empty_string());
  script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
  script->set_context_data(isolate->native_context()->debug_context_id());
  script->set_type(Script::TYPE_WASM);

  Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
  int hash = StringHasher::HashSequentialString(
      reinterpret_cast<const char*>(wire_bytes.begin()), wire_bytes.length(),
      kZeroHashSeed);

  const int kBufferSize = 32;
  char buffer[kBufferSize];

  // Script name is "<module_name>-hash" if name is available and "hash"
  // otherwise.
  const WasmModule* module = native_module->module();
  Handle<String> name_str;
  if (module->name.is_set()) {
    int name_chars = SNPrintF(ArrayVector(buffer), "-%08x", hash);
    DCHECK(name_chars >= 0 && name_chars < kBufferSize);
    Handle<String> name_hash =
        isolate->factory()
            ->NewStringFromOneByte(
                VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
                AllocationType::kOld)
            .ToHandleChecked();
    Handle<String> module_name =
        WasmModuleObject::ExtractUtf8StringFromModuleBytes(
            isolate, wire_bytes, module->name, kNoInternalize);
    name_str = isolate->factory()
                   ->NewConsString(module_name, name_hash)
                   .ToHandleChecked();
  } else {
    int name_chars = SNPrintF(ArrayVector(buffer), "%08x", hash);
    DCHECK(name_chars >= 0 && name_chars < kBufferSize);
    name_str = isolate->factory()
                   ->NewStringFromOneByte(
                       VectorOf(reinterpret_cast<uint8_t*>(buffer), name_chars),
                       AllocationType::kOld)
                   .ToHandleChecked();
  }
  script->set_name(*name_str);
  MaybeHandle<String> url_str;
  if (!source_url.empty()) {
    url_str =
        isolate->factory()->NewStringFromUtf8(source_url, AllocationType::kOld);
  } else {
    Handle<String> url_prefix =
        isolate->factory()->InternalizeString(StaticCharVector("wasm://wasm/"));
    url_str = isolate->factory()->NewConsString(url_prefix, name_str);
  }
  script->set_source_url(*url_str.ToHandleChecked());

742 743 744 745 746 747
  const WasmDebugSymbols& debug_symbols =
      native_module->module()->debug_symbols;
  if (debug_symbols.type == WasmDebugSymbols::Type::SourceMap &&
      !debug_symbols.external_url.is_empty()) {
    Vector<const char> external_url =
        ModuleWireBytes(wire_bytes).GetNameOrNull(debug_symbols.external_url);
748
    MaybeHandle<String> src_map_str = isolate->factory()->NewStringFromUtf8(
749
        external_url, AllocationType::kOld);
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
    script->set_source_mapping_url(*src_map_str.ToHandleChecked());
  }

  // Use the given shared {NativeModule}, but increase its reference count by
  // allocating a new {Managed<T>} that the {Script} references.
  size_t code_size_estimate = native_module->committed_code_space();
  size_t memory_estimate =
      code_size_estimate +
      wasm::WasmCodeManager::EstimateNativeModuleMetaDataSize(module);
  Handle<Managed<wasm::NativeModule>> managed_native_module =
      Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
                                                 std::move(native_module));
  script->set_wasm_managed_native_module(*managed_native_module);
  script->set_wasm_breakpoint_infos(ReadOnlyRoots(isolate).empty_fixed_array());
  script->set_wasm_weak_instance_list(
      ReadOnlyRoots(isolate).empty_weak_array_list());
  return script;
}
}  // namespace

770
Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
771 772
    Isolate* isolate, std::shared_ptr<NativeModule> shared_native_module,
    Vector<const char> source_url) {
773
  DCHECK_EQ(this, shared_native_module->engine());
774 775
  NativeModule* native_module = shared_native_module.get();
  ModuleWireBytes wire_bytes(native_module->wire_bytes());
776 777
  Handle<Script> script =
      GetOrCreateScript(isolate, shared_native_module, source_url);
778 779
  Handle<FixedArray> export_wrappers;
  CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
780
  Handle<WasmModuleObject> module_object = WasmModuleObject::New(
781
      isolate, shared_native_module, script, export_wrappers);
782 783 784
  {
    base::MutexGuard lock(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
785 786
    isolates_[isolate]->native_modules.emplace(native_module,
                                               std::move(shared_native_module));
787 788
    DCHECK_EQ(1, native_modules_.count(native_module));
    native_modules_[native_module]->isolates.insert(isolate);
789
  }
790 791 792

  // Finish the Wasm script now and make it public to the debugger.
  isolate->debug()->OnAfterCompile(script);
793 794 795
  return module_object;
}

796
CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
797
  base::MutexGuard guard(&mutex_);
798 799 800 801 802 803 804
  if (compilation_stats_ == nullptr) {
    compilation_stats_.reset(new CompilationStatistics());
  }
  return compilation_stats_.get();
}

void WasmEngine::DumpAndResetTurboStatistics() {
805
  base::MutexGuard guard(&mutex_);
806 807 808 809 810 811 812
  if (compilation_stats_ != nullptr) {
    StdoutStream os;
    os << AsPrintableStatistics{*compilation_stats_.get(), false} << std::endl;
  }
  compilation_stats_.reset();
}

813
CodeTracer* WasmEngine::GetCodeTracer() {
814
  base::MutexGuard guard(&mutex_);
815 816 817 818
  if (code_tracer_ == nullptr) code_tracer_.reset(new CodeTracer(-1));
  return code_tracer_.get();
}

819
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
820 821
    Isolate* isolate, const WasmFeatures& enabled,
    std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
822
    const char* api_method_name,
823
    std::shared_ptr<CompilationResultResolver> resolver) {
824 825
  AsyncCompileJob* job =
      new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
826
                          context, api_method_name, std::move(resolver));
827
  // Pass ownership to the unique_ptr in {async_compile_jobs_}.
828
  base::MutexGuard guard(&mutex_);
829
  async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
830 831 832
  return job;
}

833 834
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
    AsyncCompileJob* job) {
835
  base::MutexGuard guard(&mutex_);
836 837
  auto item = async_compile_jobs_.find(job);
  DCHECK(item != async_compile_jobs_.end());
838
  std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
839
  async_compile_jobs_.erase(item);
840
  return result;
841 842
}

843
bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
844
  base::MutexGuard guard(&mutex_);
845
  DCHECK_EQ(1, isolates_.count(isolate));
846
  for (auto& entry : async_compile_jobs_) {
847 848 849 850 851
    if (entry.first->isolate() == isolate) return true;
  }
  return false;
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
  // Under the mutex get all jobs to delete. Then delete them without holding
  // the mutex, such that deletion can reenter the WasmEngine.
  std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
  {
    base::MutexGuard guard(&mutex_);
    for (auto it = async_compile_jobs_.begin();
         it != async_compile_jobs_.end();) {
      if (!it->first->context().is_identical_to(context)) {
        ++it;
        continue;
      }
      jobs_to_delete.push_back(std::move(it->second));
      it = async_compile_jobs_.erase(it);
    }
  }
}

870
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
871 872 873 874 875 876
  // Under the mutex get all jobs to delete. Then delete them without holding
  // the mutex, such that deletion can reenter the WasmEngine.
  std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
877 878
    for (auto it = async_compile_jobs_.begin();
         it != async_compile_jobs_.end();) {
879 880 881 882 883
      if (it->first->isolate() != isolate) {
        ++it;
        continue;
      }
      jobs_to_delete.push_back(std::move(it->second));
884
      it = async_compile_jobs_.erase(it);
885 886 887 888
    }
  }
}

889
void WasmEngine::AddIsolate(Isolate* isolate) {
890
  base::MutexGuard guard(&mutex_);
891
  DCHECK_EQ(0, isolates_.count(isolate));
892
  isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
893 894 895 896 897 898 899 900

  // Install sampling GC callback.
  // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
  // bias samples towards apps with high memory pressure. We should switch to
  // using sampling based on regular intervals independent of the GC.
  auto callback = [](v8::Isolate* v8_isolate, v8::GCType type,
                     v8::GCCallbackFlags flags, void* data) {
    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
901
    Counters* counters = isolate->counters();
902 903 904
    WasmEngine* engine = isolate->wasm_engine();
    base::MutexGuard lock(&engine->mutex_);
    DCHECK_EQ(1, engine->isolates_.count(isolate));
905 906
    for (auto& entry : engine->isolates_[isolate]->native_modules) {
      entry.first->SampleCodeSize(counters, NativeModule::kSampling);
907 908 909 910
    }
  };
  isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
                                         nullptr);
911 912 913 914 915
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (gdb_server_) {
    gdb_server_->AddIsolate(isolate);
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
916 917 918
}

void WasmEngine::RemoveIsolate(Isolate* isolate) {
919 920 921 922 923 924
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (gdb_server_) {
    gdb_server_->RemoveIsolate(isolate);
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

925
  base::MutexGuard guard(&mutex_);
926 927
  auto it = isolates_.find(isolate);
  DCHECK_NE(isolates_.end(), it);
928 929
  std::unique_ptr<IsolateInfo> info = std::move(it->second);
  isolates_.erase(it);
930 931
  for (auto& entry : info->native_modules) {
    auto* native_module = entry.first;
932 933 934 935
    DCHECK_EQ(1, native_modules_.count(native_module));
    DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
    auto* info = native_modules_[native_module].get();
    info->isolates.erase(isolate);
936 937 938 939 940
    if (current_gc_info_) {
      for (WasmCode* code : info->potentially_dead_code) {
        current_gc_info_->dead_code.erase(code);
      }
    }
941 942 943
    if (native_module->HasDebugInfo()) {
      native_module->GetDebugInfo()->RemoveIsolate(isolate);
    }
944
  }
945 946 947
  if (current_gc_info_) {
    if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
  }
948 949 950 951 952
  if (auto* task = info->log_codes_task) task->Cancel();
  if (!info->code_to_log.empty()) {
    WasmCode::DecrementRefCount(VectorOf(info->code_to_log));
    info->code_to_log.clear();
  }
953 954
}

955 956
void WasmEngine::LogCode(Vector<WasmCode*> code_vec) {
  if (code_vec.empty()) return;
957
  base::MutexGuard guard(&mutex_);
958
  NativeModule* native_module = code_vec[0]->native_module();
959 960
  DCHECK_EQ(1, native_modules_.count(native_module));
  for (Isolate* isolate : native_modules_[native_module]->isolates) {
961 962
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
963
    if (info->log_codes == false) continue;
964
    if (info->log_codes_task == nullptr) {
965
      auto new_task = std::make_unique<LogCodesTask>(
966
          &mutex_, &info->log_codes_task, isolate, this);
967 968
      info->log_codes_task = new_task.get();
      info->foreground_task_runner->PostTask(std::move(new_task));
969 970
    }
    if (info->code_to_log.empty()) {
971
      isolate->stack_guard()->RequestLogWasmCode();
972
    }
973 974 975 976 977 978
    info->code_to_log.insert(info->code_to_log.end(), code_vec.begin(),
                             code_vec.end());
    for (WasmCode* code : code_vec) {
      DCHECK_EQ(native_module, code->native_module());
      code->IncRef();
    }
979 980 981
  }
}

982 983 984 985 986 987 988
void WasmEngine::EnableCodeLogging(Isolate* isolate) {
  base::MutexGuard guard(&mutex_);
  auto it = isolates_.find(isolate);
  DCHECK_NE(isolates_.end(), it);
  it->second->log_codes = true;
}

989 990 991 992
void WasmEngine::LogOutstandingCodesForIsolate(Isolate* isolate) {
  // If by now we should not log code any more, do not log it.
  if (!WasmCode::ShouldBeLogged(isolate)) return;

993 994 995 996 997 998 999 1000 1001 1002
  // Under the mutex, get the vector of wasm code to log. Then log and decrement
  // the ref count without holding the mutex.
  std::vector<WasmCode*> code_to_log;
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    code_to_log.swap(isolates_[isolate]->code_to_log);
  }
  if (code_to_log.empty()) return;
  for (WasmCode* code : code_to_log) {
1003 1004
    code->LogCode(isolate);
  }
1005
  WasmCode::DecrementRefCount(VectorOf(code_to_log));
1006 1007
}

1008 1009
std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
    Isolate* isolate, const WasmFeatures& enabled,
1010
    std::shared_ptr<const WasmModule> module, size_t code_size_estimate) {
1011 1012
#ifdef V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING
  if (FLAG_wasm_gdb_remote && !gdb_server_) {
1013
    gdb_server_ = gdb_server::GdbServer::Create();
1014
    gdb_server_->AddIsolate(isolate);
1015 1016 1017
  }
#endif  // V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING

1018 1019
  std::shared_ptr<NativeModule> native_module = code_manager_.NewNativeModule(
      this, isolate, enabled, code_size_estimate, std::move(module));
1020
  base::MutexGuard lock(&mutex_);
1021
  auto pair = native_modules_.insert(std::make_pair(
1022
      native_module.get(), std::make_unique<NativeModuleInfo>()));
1023 1024
  DCHECK(pair.second);  // inserted new entry.
  pair.first->second.get()->isolates.insert(isolate);
1025
  auto& modules_per_isolate = isolates_[isolate]->native_modules;
1026
  modules_per_isolate.emplace(native_module.get(), native_module);
1027
  if (isolates_[isolate]->keep_tiered_down) {
1028
    native_module->SetTieringState(kTieredDown);
1029
  }
1030 1031 1032 1033
  isolate->counters()->wasm_modules_per_isolate()->AddSample(
      static_cast<int>(modules_per_isolate.size()));
  isolate->counters()->wasm_modules_per_engine()->AddSample(
      static_cast<int>(native_modules_.size()));
1034 1035 1036
  return native_module;
}

1037
std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
1038 1039 1040
    ModuleOrigin origin, Vector<const uint8_t> wire_bytes, Isolate* isolate) {
  std::shared_ptr<NativeModule> native_module =
      native_module_cache_.MaybeGetNativeModule(origin, wire_bytes);
1041
  bool recompile_module = false;
1042 1043 1044 1045 1046 1047 1048
  if (native_module) {
    base::MutexGuard guard(&mutex_);
    auto& native_module_info = native_modules_[native_module.get()];
    if (!native_module_info) {
      native_module_info = std::make_unique<NativeModuleInfo>();
    }
    native_module_info->isolates.insert(isolate);
1049 1050
    isolates_[isolate]->native_modules.emplace(native_module.get(),
                                               native_module);
1051 1052 1053 1054
    if (isolates_[isolate]->keep_tiered_down) {
      native_module->SetTieringState(kTieredDown);
      recompile_module = true;
    }
1055
  }
1056
  // Potentially recompile the module for tier down, after releasing the mutex.
1057
  if (recompile_module) native_module->RecompileForTiering();
1058
  return native_module;
1059 1060
}

1061
bool WasmEngine::UpdateNativeModuleCache(
1062 1063
    bool error, std::shared_ptr<NativeModule>* native_module,
    Isolate* isolate) {
1064
  DCHECK_EQ(this, native_module->get()->engine());
1065 1066 1067 1068 1069
  // Pass {native_module} by value here to keep it alive until at least after
  // we returned from {Update}. Otherwise, we might {Erase} it inside {Update}
  // which would lock the mutex twice.
  auto prev = native_module->get();
  *native_module = native_module_cache_.Update(*native_module, error);
1070 1071 1072

  if (prev == native_module->get()) return true;

1073 1074 1075 1076 1077 1078
  bool recompile_module = false;
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, native_modules_.count(native_module->get()));
    native_modules_[native_module->get()]->isolates.insert(isolate);
    DCHECK_EQ(1, isolates_.count(isolate));
1079 1080
    isolates_[isolate]->native_modules.emplace(native_module->get(),
                                               *native_module);
1081 1082 1083 1084 1085 1086
    if (isolates_[isolate]->keep_tiered_down) {
      native_module->get()->SetTieringState(kTieredDown);
      recompile_module = true;
    }
  }
  // Potentially recompile the module for tier down, after releasing the mutex.
1087
  if (recompile_module) native_module->get()->RecompileForTiering();
1088
  return false;
1089 1090 1091 1092 1093 1094 1095 1096
}

bool WasmEngine::GetStreamingCompilationOwnership(size_t prefix_hash) {
  return native_module_cache_.GetStreamingCompilationOwnership(prefix_hash);
}

void WasmEngine::StreamingCompilationFailed(size_t prefix_hash) {
  native_module_cache_.StreamingCompilationFailed(prefix_hash);
1097 1098
}

1099
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
1100 1101 1102 1103 1104 1105 1106 1107
  base::MutexGuard guard(&mutex_);
  auto it = native_modules_.find(native_module);
  DCHECK_NE(native_modules_.end(), it);
  for (Isolate* isolate : it->second->isolates) {
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
    DCHECK_EQ(1, info->native_modules.count(native_module));
    info->native_modules.erase(native_module);
1108
    info->scripts.erase(native_module);
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
    // If there are {WasmCode} objects of the deleted {NativeModule}
    // outstanding to be logged in this isolate, remove them. Decrementing the
    // ref count is not needed, since the {NativeModule} dies anyway.
    size_t remaining = info->code_to_log.size();
    if (remaining > 0) {
      for (size_t i = 0; i < remaining; ++i) {
        while (i < remaining &&
               info->code_to_log[i]->native_module() == native_module) {
          // Move the last remaining item to this slot (this can be the same
          // as {i}, which is OK).
          info->code_to_log[i] = info->code_to_log[--remaining];
1120 1121
        }
      }
1122
      info->code_to_log.resize(remaining);
1123
    }
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
  }
  // If there is a GC running which has references to code contained in the
  // deleted {NativeModule}, remove those references.
  if (current_gc_info_) {
    for (auto it = current_gc_info_->dead_code.begin(),
              end = current_gc_info_->dead_code.end();
         it != end;) {
      if ((*it)->native_module() == native_module) {
        it = current_gc_info_->dead_code.erase(it);
      } else {
        ++it;
1135 1136
      }
    }
1137 1138
    TRACE_CODE_GC("Native module %p died, reducing dead code objects to %zu.\n",
                  native_module, current_gc_info_->dead_code.size());
1139
  }
1140
  native_module_cache_.Erase(native_module);
1141
  native_modules_.erase(it);
1142 1143
}

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
namespace {
class SampleTopTierCodeSizeTask : public CancelableTask {
 public:
  SampleTopTierCodeSizeTask(Isolate* isolate,
                            std::weak_ptr<NativeModule> native_module)
      : CancelableTask(isolate),
        isolate_(isolate),
        native_module_(std::move(native_module)) {}

  void RunInternal() override {
    if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
      native_module->SampleCodeSize(isolate_->counters(),
                                    NativeModule::kAfterTopTier);
    }
  }

 private:
  Isolate* const isolate_;
  const std::weak_ptr<NativeModule> native_module_;
};
}  // namespace

void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
    const std::shared_ptr<NativeModule>& native_module) {
  base::MutexGuard lock(&mutex_);
1169 1170
  DCHECK_EQ(1, native_modules_.count(native_module.get()));
  for (Isolate* isolate : native_modules_[native_module.get()]->isolates) {
1171 1172 1173
    DCHECK_EQ(1, isolates_.count(isolate));
    IsolateInfo* info = isolates_[isolate].get();
    info->foreground_task_runner->PostTask(
1174
        std::make_unique<SampleTopTierCodeSizeTask>(isolate, native_module));
1175 1176 1177
  }
}

1178 1179
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
                                     Vector<WasmCode*> live_code) {
1180
  TRACE_EVENT0("v8.wasm", "wasm.ReportLiveCodeForGC");
1181 1182
  TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
                live_code.size());
1183
  base::MutexGuard guard(&mutex_);
1184 1185 1186
  // This report might come in late (note that we trigger both a stack guard and
  // a foreground task). In that case, ignore it.
  if (current_gc_info_ == nullptr) return;
1187
  if (!RemoveIsolateFromCurrentGC(isolate)) return;
1188 1189
  isolate->counters()->wasm_module_num_triggered_code_gcs()->AddSample(
      current_gc_info_->gc_sequence_index);
1190
  for (WasmCode* code : live_code) current_gc_info_->dead_code.erase(code);
1191
  PotentiallyFinishCurrentGC();
1192 1193
}

1194 1195 1196 1197 1198
void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
  wasm::WasmCodeRefScope code_ref_scope;
  std::unordered_set<wasm::WasmCode*> live_wasm_code;
  for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
    StackFrame* const frame = it.frame();
1199 1200
    if (frame->type() != StackFrame::WASM) continue;
    live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
1201 1202
  }

1203 1204
  CheckNoArchivedThreads(isolate);

1205 1206 1207 1208
  ReportLiveCodeForGC(isolate,
                      OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}

1209 1210 1211 1212
bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
  base::MutexGuard guard(&mutex_);
  auto it = native_modules_.find(code->native_module());
  DCHECK_NE(native_modules_.end(), it);
1213 1214 1215
  NativeModuleInfo* info = it->second.get();
  if (info->dead_code.count(code)) return false;  // Code is already dead.
  auto added = info->potentially_dead_code.insert(code);
1216 1217
  if (!added.second) return false;  // An entry already existed.
  new_potentially_dead_code_size_ += code->instructions().size();
1218
  if (FLAG_wasm_code_gc) {
1219
    // Trigger a GC if 64kB plus 10% of committed code are potentially dead.
1220 1221 1222
    size_t dead_code_limit =
        FLAG_stress_wasm_code_gc
            ? 0
1223
            : 64 * KB + code_manager_.committed_code_space() / 10;
1224
    if (new_potentially_dead_code_size_ > dead_code_limit) {
1225 1226
      bool inc_gc_count =
          info->num_code_gcs_triggered < std::numeric_limits<int8_t>::max();
1227
      if (current_gc_info_ == nullptr) {
1228
        if (inc_gc_count) ++info->num_code_gcs_triggered;
1229 1230 1231
        TRACE_CODE_GC(
            "Triggering GC (potentially dead: %zu bytes; limit: %zu bytes).\n",
            new_potentially_dead_code_size_, dead_code_limit);
1232 1233 1234
        TriggerGC(info->num_code_gcs_triggered);
      } else if (current_gc_info_->next_gc_sequence_index == 0) {
        if (inc_gc_count) ++info->num_code_gcs_triggered;
1235 1236 1237 1238
        TRACE_CODE_GC(
            "Scheduling another GC after the current one (potentially dead: "
            "%zu bytes; limit: %zu bytes).\n",
            new_potentially_dead_code_size_, dead_code_limit);
1239 1240
        current_gc_info_->next_gc_sequence_index = info->num_code_gcs_triggered;
        DCHECK_NE(0, current_gc_info_->next_gc_sequence_index);
1241
      }
1242
    }
1243
  }
1244 1245 1246
  return true;
}

1247 1248 1249 1250 1251 1252
void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
  base::MutexGuard guard(&mutex_);
  FreeDeadCodeLocked(dead_code);
}

void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
1253
  TRACE_EVENT0("v8.wasm", "wasm.FreeDeadCode");
1254 1255 1256 1257
  DCHECK(!mutex_.TryLock());
  for (auto& dead_code_entry : dead_code) {
    NativeModule* native_module = dead_code_entry.first;
    const std::vector<WasmCode*>& code_vec = dead_code_entry.second;
1258 1259
    DCHECK_EQ(1, native_modules_.count(native_module));
    auto* info = native_modules_[native_module].get();
1260 1261 1262
    TRACE_CODE_GC("Freeing %zu code object%s of module %p.\n", code_vec.size(),
                  code_vec.size() == 1 ? "" : "s", native_module);
    for (WasmCode* code : code_vec) {
1263 1264 1265
      DCHECK_EQ(1, info->dead_code.count(code));
      info->dead_code.erase(code);
    }
1266
    native_module->FreeCode(VectorOf(code_vec));
1267 1268 1269
  }
}

1270 1271 1272
Handle<Script> WasmEngine::GetOrCreateScript(
    Isolate* isolate, const std::shared_ptr<NativeModule>& native_module,
    Vector<const char> source_url) {
1273 1274 1275 1276
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    auto& scripts = isolates_[isolate]->scripts;
1277
    auto it = scripts.find(native_module.get());
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
    if (it != scripts.end()) {
      Handle<Script> weak_global_handle = it->second.handle();
      if (weak_global_handle.is_null()) {
        scripts.erase(it);
      } else {
        return Handle<Script>::New(*weak_global_handle, isolate);
      }
    }
  }
  // Temporarily release the mutex to let the GC collect native modules.
1288
  auto script = CreateWasmScript(isolate, native_module, source_url);
1289 1290 1291 1292
  {
    base::MutexGuard guard(&mutex_);
    DCHECK_EQ(1, isolates_.count(isolate));
    auto& scripts = isolates_[isolate]->scripts;
1293 1294
    DCHECK_EQ(0, scripts.count(native_module.get()));
    scripts.emplace(native_module.get(), WeakScriptHandle(script));
1295 1296 1297 1298
    return script;
  }
}

1299
void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
1300
  DCHECK(!mutex_.TryLock());
1301 1302
  DCHECK_NULL(current_gc_info_);
  DCHECK(FLAG_wasm_code_gc);
1303
  new_potentially_dead_code_size_ = 0;
1304
  current_gc_info_.reset(new CurrentGCInfo(gc_sequence_index));
1305 1306 1307 1308 1309 1310 1311 1312
  // Add all potentially dead code to this GC, and trigger a GC task in each
  // isolate.
  for (auto& entry : native_modules_) {
    NativeModuleInfo* info = entry.second.get();
    if (info->potentially_dead_code.empty()) continue;
    for (auto* isolate : native_modules_[entry.first]->isolates) {
      auto& gc_task = current_gc_info_->outstanding_isolates[isolate];
      if (!gc_task) {
1313
        auto new_task = std::make_unique<WasmGCForegroundTask>(isolate);
1314 1315 1316 1317 1318
        gc_task = new_task.get();
        DCHECK_EQ(1, isolates_.count(isolate));
        isolates_[isolate]->foreground_task_runner->PostTask(
            std::move(new_task));
      }
1319
      isolate->stack_guard()->RequestWasmCodeGC();
1320 1321 1322 1323 1324
    }
    for (WasmCode* code : info->potentially_dead_code) {
      current_gc_info_->dead_code.insert(code);
    }
  }
1325 1326 1327
  TRACE_CODE_GC(
      "Starting GC. Total number of potentially dead code objects: %zu\n",
      current_gc_info_->dead_code.size());
1328 1329 1330 1331 1332
  // Ensure that there are outstanding isolates that will eventually finish this
  // GC. If there are no outstanding isolates, we finish the GC immediately.
  PotentiallyFinishCurrentGC();
  DCHECK(current_gc_info_ == nullptr ||
         !current_gc_info_->outstanding_isolates.empty());
1333 1334
}

1335 1336 1337
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
  DCHECK(!mutex_.TryLock());
  DCHECK_NOT_NULL(current_gc_info_);
1338
  return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
}

void WasmEngine::PotentiallyFinishCurrentGC() {
  DCHECK(!mutex_.TryLock());
  TRACE_CODE_GC(
      "Remaining dead code objects: %zu; outstanding isolates: %zu.\n",
      current_gc_info_->dead_code.size(),
      current_gc_info_->outstanding_isolates.size());

  // If there are more outstanding isolates, return immediately.
  if (!current_gc_info_->outstanding_isolates.empty()) return;

  // All remaining code in {current_gc_info->dead_code} is really dead.
  // Move it from the set of potentially dead code to the set of dead code,
  // and decrement its ref count.
1354
  size_t num_freed = 0;
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
  DeadCodeMap dead_code;
  for (WasmCode* code : current_gc_info_->dead_code) {
    DCHECK_EQ(1, native_modules_.count(code->native_module()));
    auto* native_module_info = native_modules_[code->native_module()].get();
    DCHECK_EQ(1, native_module_info->potentially_dead_code.count(code));
    native_module_info->potentially_dead_code.erase(code);
    DCHECK_EQ(0, native_module_info->dead_code.count(code));
    native_module_info->dead_code.insert(code);
    if (code->DecRefOnDeadCode()) {
      dead_code[code->native_module()].push_back(code);
1365
      ++num_freed;
1366 1367
    }
  }
1368 1369 1370

  FreeDeadCodeLocked(dead_code);

1371 1372
  TRACE_CODE_GC("Found %zu dead code objects, freed %zu.\n",
                current_gc_info_->dead_code.size(), num_freed);
1373 1374
  USE(num_freed);

1375
  int8_t next_gc_sequence_index = current_gc_info_->next_gc_sequence_index;
1376
  current_gc_info_.reset();
1377
  if (next_gc_sequence_index != 0) TriggerGC(next_gc_sequence_index);
1378 1379
}

1380 1381
namespace {

1382
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
1383
                                GetSharedWasmEngine)
1384 1385 1386

}  // namespace

1387
// static
1388
void WasmEngine::InitializeOncePerProcess() {
1389
  *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
1390 1391
}

1392
// static
1393
void WasmEngine::GlobalTearDown() {
1394
  GetSharedWasmEngine()->reset();
1395 1396
}

1397
// static
1398
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
1399
  return *GetSharedWasmEngine();
1400 1401
}

1402 1403
// {max_initial_mem_pages} is declared in wasm-limits.h.
uint32_t max_initial_mem_pages() {
1404 1405 1406 1407
  STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
  return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}

1408 1409 1410 1411 1412 1413
uint32_t max_maximum_mem_pages() {
  STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
  return std::min(uint32_t{kV8MaxWasmMemoryPages},
                  FLAG_wasm_max_mem_pages_growth);
}

1414 1415 1416 1417 1418 1419
// {max_table_init_entries} is declared in wasm-limits.h.
uint32_t max_table_init_entries() {
  return std::min(uint32_t{kV8MaxWasmTableInitEntries},
                  FLAG_wasm_max_table_size);
}

1420 1421 1422 1423 1424 1425 1426
// {max_module_size} is declared in wasm-limits.h.
size_t max_module_size() {
  return FLAG_experimental_wasm_allow_huge_modules
             ? RoundDown<kSystemPointerSize>(size_t{kMaxInt})
             : kV8MaxWasmModuleSize;
}

1427 1428
#undef TRACE_CODE_GC

1429 1430 1431
}  // namespace wasm
}  // namespace internal
}  // namespace v8