Commit c8fe898d authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Track code reservations in the WasmMemoryTracker

The WasmMemoryTracker keeps track of reserved memory in order to avoid
running out of virtual address space. So far, we were only tracking
reservations for wasm memory, and not for code. This CL changes that to
also include code reservations.

Drive-by: Some cleanup around the allocation of the WasmCodeManager.

R=titzer@chromium.org

Bug: chromium:883639
Change-Id: I0c2586a742022ae00752132e048346d54e2a1a7c
Reviewed-on: https://chromium-review.googlesource.com/1230134Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56001}
parent f25c1218
...@@ -769,9 +769,11 @@ NativeModule::~NativeModule() { ...@@ -769,9 +769,11 @@ NativeModule::~NativeModule() {
wasm_code_manager_->FreeNativeModule(this); wasm_code_manager_->FreeNativeModule(this);
} }
WasmCodeManager::WasmCodeManager(size_t max_committed) { WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed)
: memory_tracker_(memory_tracker),
remaining_uncommitted_code_space_(max_committed) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory); DCHECK_LE(max_committed, kMaxWasmCodeMemory);
remaining_uncommitted_code_space_.store(max_committed);
} }
bool WasmCodeManager::Commit(Address start, size_t size) { bool WasmCodeManager::Commit(Address start, size_t size) {
...@@ -815,15 +817,18 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { ...@@ -815,15 +817,18 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0); DCHECK_GT(size, 0);
size = RoundUp(size, page_allocator->AllocatePageSize()); size = RoundUp(size, page_allocator->AllocatePageSize());
if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint, VirtualMemory mem(page_allocator, size, hint,
page_allocator->AllocatePageSize()); page_allocator->AllocatePageSize());
if (mem.IsReserved()) { if (!mem.IsReserved()) {
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", memory_tracker_->ReleaseReservation(size);
reinterpret_cast<void*>(mem.address()), return {};
reinterpret_cast<void*>(mem.end()), mem.size());
} }
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
reinterpret_cast<void*>(mem.address()),
reinterpret_cast<void*>(mem.end()), mem.size());
return mem; return mem;
} }
...@@ -906,7 +911,7 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule( ...@@ -906,7 +911,7 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
if (mem.IsReserved()) break; if (mem.IsReserved()) break;
if (retries == kAllocationRetries) { if (retries == kAllocationRetries) {
V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule"); V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
return nullptr; UNREACHABLE();
} }
// Run one GC, then try the allocation again. // Run one GC, then try the allocation again.
isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical, isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
...@@ -1032,6 +1037,7 @@ void WasmCodeManager::Free(VirtualMemory* mem) { ...@@ -1032,6 +1037,7 @@ void WasmCodeManager::Free(VirtualMemory* mem) {
void* end = reinterpret_cast<void*>(mem->end()); void* end = reinterpret_cast<void*>(mem->end());
size_t size = mem->size(); size_t size = mem->size();
mem->Free(); mem->Free();
memory_tracker_->ReleaseReservation(size);
TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size); TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
} }
......
...@@ -29,6 +29,7 @@ namespace wasm { ...@@ -29,6 +29,7 @@ namespace wasm {
class NativeModule; class NativeModule;
class WasmCodeManager; class WasmCodeManager;
class WasmMemoryTracker;
struct WasmModule; struct WasmModule;
struct AddressRange { struct AddressRange {
...@@ -427,7 +428,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -427,7 +428,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
class V8_EXPORT_PRIVATE WasmCodeManager final { class V8_EXPORT_PRIVATE WasmCodeManager final {
public: public:
explicit WasmCodeManager(size_t max_committed); explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed);
// Create a new NativeModule. The caller is responsible for its // Create a new NativeModule. The caller is responsible for its
// lifetime. The native module will be given some memory for code, // lifetime. The native module will be given some memory for code,
// which will be page size aligned. The size of the initial memory // which will be page size aligned. The size of the initial memory
...@@ -469,6 +471,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { ...@@ -469,6 +471,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRanges(Address start, Address end, NativeModule*); void AssignRanges(Address start, Address end, NativeModule*);
bool ShouldForceCriticalMemoryPressureNotification(); bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_;
mutable base::Mutex native_modules_mutex_; mutable base::Mutex native_modules_mutex_;
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_; std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
std::unordered_set<NativeModule*> native_modules_; std::unordered_set<NativeModule*> native_modules_;
......
...@@ -18,8 +18,8 @@ namespace v8 { ...@@ -18,8 +18,8 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
WasmEngine::WasmEngine(std::unique_ptr<WasmCodeManager> code_manager) WasmEngine::WasmEngine()
: code_manager_(std::move(code_manager)) {} : code_manager_(&memory_tracker_, kMaxWasmCodeMemory) {}
WasmEngine::~WasmEngine() { WasmEngine::~WasmEngine() {
// All AsyncCompileJobs have been canceled. // All AsyncCompileJobs have been canceled.
...@@ -270,11 +270,6 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) { ...@@ -270,11 +270,6 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
namespace { namespace {
WasmEngine* AllocateNewWasmEngine() {
return new WasmEngine(std::unique_ptr<WasmCodeManager>(
new WasmCodeManager(kMaxWasmCodeMemory)));
}
struct WasmEnginePointerConstructTrait final { struct WasmEnginePointerConstructTrait final {
static void Construct(void* raw_ptr) { static void Construct(void* raw_ptr) {
auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr); auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr);
...@@ -293,7 +288,7 @@ base::LazyStaticInstance<std::shared_ptr<WasmEngine>, ...@@ -293,7 +288,7 @@ base::LazyStaticInstance<std::shared_ptr<WasmEngine>,
void WasmEngine::InitializeOncePerProcess() { void WasmEngine::InitializeOncePerProcess() {
if (!FLAG_wasm_shared_engine) return; if (!FLAG_wasm_shared_engine) return;
global_wasm_engine.Pointer()->reset(AllocateNewWasmEngine()); global_wasm_engine.Pointer()->reset(new WasmEngine());
} }
void WasmEngine::GlobalTearDown() { void WasmEngine::GlobalTearDown() {
...@@ -303,7 +298,7 @@ void WasmEngine::GlobalTearDown() { ...@@ -303,7 +298,7 @@ void WasmEngine::GlobalTearDown() {
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() { std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
if (FLAG_wasm_shared_engine) return global_wasm_engine.Get(); if (FLAG_wasm_shared_engine) return global_wasm_engine.Get();
return std::shared_ptr<WasmEngine>(AllocateNewWasmEngine()); return std::shared_ptr<WasmEngine>(new WasmEngine());
} }
} // namespace wasm } // namespace wasm
......
...@@ -44,7 +44,7 @@ class V8_EXPORT_PRIVATE InstantiationResultResolver { ...@@ -44,7 +44,7 @@ class V8_EXPORT_PRIVATE InstantiationResultResolver {
// loading, instantiating, and executing WASM code. // loading, instantiating, and executing WASM code.
class V8_EXPORT_PRIVATE WasmEngine { class V8_EXPORT_PRIVATE WasmEngine {
public: public:
explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager); WasmEngine();
~WasmEngine(); ~WasmEngine();
// Synchronously validates the given bytes that represent an encoded WASM // Synchronously validates the given bytes that represent an encoded WASM
...@@ -108,7 +108,7 @@ class V8_EXPORT_PRIVATE WasmEngine { ...@@ -108,7 +108,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
Handle<WasmModuleObject> ImportNativeModule( Handle<WasmModuleObject> ImportNativeModule(
Isolate* isolate, std::shared_ptr<NativeModule> shared_module); Isolate* isolate, std::shared_ptr<NativeModule> shared_module);
WasmCodeManager* code_manager() const { return code_manager_.get(); } WasmCodeManager* code_manager() { return &code_manager_; }
WasmMemoryTracker* memory_tracker() { return &memory_tracker_; } WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
...@@ -150,8 +150,8 @@ class V8_EXPORT_PRIVATE WasmEngine { ...@@ -150,8 +150,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
Handle<Context> context, Handle<Context> context,
std::shared_ptr<CompilationResultResolver> resolver); std::shared_ptr<CompilationResultResolver> resolver);
std::unique_ptr<WasmCodeManager> code_manager_;
WasmMemoryTracker memory_tracker_; WasmMemoryTracker memory_tracker_;
WasmCodeManager code_manager_;
AccountingAllocator allocator_; AccountingAllocator allocator_;
// This mutex protects all information which is mutated concurrently or // This mutex protects all information which is mutated concurrently or
......
...@@ -138,9 +138,9 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) { ...@@ -138,9 +138,9 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
#elif V8_TARGET_ARCH_64_BIT #elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards // We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions. // once we fill everything up with full-sized guard regions.
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4GiB constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else #else
constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
#endif #endif
while (true) { while (true) {
......
...@@ -17,15 +17,19 @@ namespace v8 { ...@@ -17,15 +17,19 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory
// and wasm code. There is an upper limit on the total reserved memory which is
// checked by this class. Allocations are stored so we can look them up when an
// array buffer dies and figure out the reservation and allocation bounds for
// that buffer.
class WasmMemoryTracker { class WasmMemoryTracker {
public: public:
WasmMemoryTracker() = default; WasmMemoryTracker() = default;
~WasmMemoryTracker(); V8_EXPORT_PRIVATE ~WasmMemoryTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter // ReserveAddressSpace attempts to increase the reserved address space counter
// to determine whether there is enough headroom to allocate another guarded // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// Wasm memory. Returns true if successful (meaning it is okay to go ahead and // and reserve {num_bytes} bytes), false otherwise.
// allocate the buffer), false otherwise.
bool ReserveAddressSpace(size_t num_bytes); bool ReserveAddressSpace(size_t num_bytes);
void RegisterAllocation(Isolate* isolate, void* allocation_base, void RegisterAllocation(Isolate* isolate, void* allocation_base,
...@@ -59,10 +63,10 @@ class WasmMemoryTracker { ...@@ -59,10 +63,10 @@ class WasmMemoryTracker {
friend WasmMemoryTracker; friend WasmMemoryTracker;
}; };
// Decreases the amount of reserved address space // Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes); void ReleaseReservation(size_t num_bytes);
// Removes an allocation from the tracker // Removes an allocation from the tracker.
AllocationData ReleaseAllocation(Isolate* isolate, const void* buffer_start); AllocationData ReleaseAllocation(Isolate* isolate, const void* buffer_start);
bool IsWasmMemory(const void* buffer_start); bool IsWasmMemory(const void* buffer_start);
...@@ -104,14 +108,14 @@ class WasmMemoryTracker { ...@@ -104,14 +108,14 @@ class WasmMemoryTracker {
// //
// We should always have: // We should always have:
// allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
std::atomic_size_t reserved_address_space_{0}; std::atomic<size_t> reserved_address_space_{0};
// Used to protect access to the allocated address space counter and // Used to protect access to the allocated address space counter and
// allocation map. This is needed because Wasm memories can be freed on // allocation map. This is needed because Wasm memories can be freed on
// another thread by the ArrayBufferTracker. // another thread by the ArrayBufferTracker.
base::Mutex mutex_; base::Mutex mutex_;
size_t allocated_address_space_{0}; size_t allocated_address_space_ = 0;
// Track Wasm memory allocation information. This is keyed by the start of the // Track Wasm memory allocation information. This is keyed by the start of the
// buffer, rather than by the start of the allocation. // buffer, rather than by the start of the allocation.
......
...@@ -26,8 +26,7 @@ namespace test_wasm_shared_engine { ...@@ -26,8 +26,7 @@ namespace test_wasm_shared_engine {
class SharedEngine { class SharedEngine {
public: public:
explicit SharedEngine(size_t max_committed = kMaxWasmCodeMemory) explicit SharedEngine(size_t max_committed = kMaxWasmCodeMemory)
: wasm_engine_(base::make_unique<WasmEngine>( : wasm_engine_(base::make_unique<WasmEngine>()) {}
base::make_unique<WasmCodeManager>(max_committed))) {}
~SharedEngine() { ~SharedEngine() {
// Ensure no remaining uses exist. // Ensure no remaining uses exist.
CHECK(wasm_engine_.unique()); CHECK(wasm_engine_.unique());
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/wasm/function-compiler.h" #include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h" #include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -183,6 +184,11 @@ class WasmCodeManagerTest : public TestWithContext, ...@@ -183,6 +184,11 @@ class WasmCodeManagerTest : public TestWithContext,
} }
size_t page() const { return AllocatePageSize(); } size_t page() const { return AllocatePageSize(); }
WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
private:
WasmMemoryTracker memory_tracker_;
}; };
INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest, INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
...@@ -190,7 +196,7 @@ INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest, ...@@ -190,7 +196,7 @@ INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
PrintWasmCodeManageTestParam); PrintWasmCodeManageTestParam);
TEST_P(WasmCodeManagerTest, EmptyCase) { TEST_P(WasmCodeManagerTest, EmptyCase) {
WasmCodeManager manager(0 * page()); WasmCodeManager manager(memory_tracker(), 0 * page());
CHECK_EQ(0, manager.remaining_uncommitted_code_space()); CHECK_EQ(0, manager.remaining_uncommitted_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()), ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
...@@ -198,7 +204,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) { ...@@ -198,7 +204,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
} }
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) { TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
WasmCodeManager manager(1 * page()); WasmCodeManager manager(memory_tracker(), 1 * page());
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space()); CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
CHECK(native_module); CHECK(native_module);
...@@ -223,7 +229,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) { ...@@ -223,7 +229,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
} }
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) { TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
WasmCodeManager manager(3 * page()); WasmCodeManager manager(memory_tracker(), 3 * page());
NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam());
CHECK(nm1); CHECK(nm1);
...@@ -235,8 +241,8 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) { ...@@ -235,8 +241,8 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
} }
TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) { TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
WasmCodeManager manager1(1 * page()); WasmCodeManager manager1(memory_tracker(), 1 * page());
WasmCodeManager manager2(2 * page()); WasmCodeManager manager2(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
CHECK(nm1); CHECK(nm1);
...@@ -249,7 +255,7 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) { ...@@ -249,7 +255,7 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
} }
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) { TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
WasmCodeManager manager(3 * page()); WasmCodeManager manager(memory_tracker(), 3 * page());
NativeModulePtr nm = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm = AllocModule(&manager, 1 * page(), GetParam());
size_t module_size = GetParam() == Fixed ? kMaxWasmCodeMemory : 1 * page(); size_t module_size = GetParam() == Fixed ? kMaxWasmCodeMemory : 1 * page();
size_t remaining_space_in_module = module_size - kJumpTableSize; size_t remaining_space_in_module = module_size - kJumpTableSize;
...@@ -268,7 +274,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) { ...@@ -268,7 +274,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
} }
TEST_P(WasmCodeManagerTest, CommitIncrements) { TEST_P(WasmCodeManagerTest, CommitIncrements) {
WasmCodeManager manager(10 * page()); WasmCodeManager manager(memory_tracker(), 10 * page());
NativeModulePtr nm = AllocModule(&manager, 3 * page(), GetParam()); NativeModulePtr nm = AllocModule(&manager, 3 * page(), GetParam());
WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment); WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
CHECK_NOT_NULL(code); CHECK_NOT_NULL(code);
...@@ -282,7 +288,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) { ...@@ -282,7 +288,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
} }
TEST_P(WasmCodeManagerTest, Lookup) { TEST_P(WasmCodeManagerTest, Lookup) {
WasmCodeManager manager(2 * page()); WasmCodeManager manager(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam());
...@@ -321,8 +327,8 @@ TEST_P(WasmCodeManagerTest, Lookup) { ...@@ -321,8 +327,8 @@ TEST_P(WasmCodeManagerTest, Lookup) {
} }
TEST_P(WasmCodeManagerTest, MultiManagerLookup) { TEST_P(WasmCodeManagerTest, MultiManagerLookup) {
WasmCodeManager manager1(2 * page()); WasmCodeManager manager1(memory_tracker(), 2 * page());
WasmCodeManager manager2(2 * page()); WasmCodeManager manager2(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam()); NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
...@@ -344,7 +350,7 @@ TEST_P(WasmCodeManagerTest, MultiManagerLookup) { ...@@ -344,7 +350,7 @@ TEST_P(WasmCodeManagerTest, MultiManagerLookup) {
} }
TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) { TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
WasmCodeManager manager(2 * page()); WasmCodeManager manager(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam()); NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment