Commit 9f90c8dc authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Force GC earlier to avoid running OOM

We currently trigger a GC when creating a module while the remaining
uncommitted code space is below 32MB. For bigger modules, this is not
enough. Instead, make this limit relative: Trigger GC if we fall below
50% of the available code space, and re-adjust this limit after each GC
to avoid repeated GCs that do not free anything.

R=ahaas@chromium.org

Bug: v8:8624
Change-Id: I7abfad3b57663d528a26d29232ad6bc2dc63cef4
Reviewed-on: https://chromium-review.googlesource.com/c/1391753Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58527}
parent ef29f4b9
...@@ -552,6 +552,8 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages, ...@@ -552,6 +552,8 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum number of 64KiB memory pages of a wasm instance") "maximum number of 64KiB memory pages of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize, DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance") "maximum table size of a wasm instance")
DEFINE_UINT(wasm_max_code_space, v8::internal::kMaxWasmCodeMB,
"maximum committed code space for wasm (in MB)")
// Enable Liftoff by default on ia32 and x64. More architectures will follow // Enable Liftoff by default on ia32 and x64. More architectures will follow
// once they are implemented and sufficiently tested. // once they are implemented and sufficiently tested.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <vector> #include <vector>
#include "src/globals.h" #include "src/globals.h"
#include "src/wasm/wasm-limits.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
......
...@@ -150,10 +150,11 @@ constexpr int kElidedFrameSlots = 0; ...@@ -150,10 +150,11 @@ constexpr int kElidedFrameSlots = 0;
constexpr int kDoubleSizeLog2 = 3; constexpr int kDoubleSizeLog2 = 3;
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
// ARM64 only supports direct calls within a 128 MB range. // ARM64 only supports direct calls within a 128 MB range.
constexpr size_t kMaxWasmCodeMemory = 128 * MB; constexpr size_t kMaxWasmCodeMB = 128;
#else #else
constexpr size_t kMaxWasmCodeMemory = 1024 * MB; constexpr size_t kMaxWasmCodeMB = 1024;
#endif #endif
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_HOST_ARCH_64_BIT #if V8_HOST_ARCH_64_BIT
constexpr int kSystemPointerSizeLog2 = 3; constexpr int kSystemPointerSizeLog2 = 3;
......
...@@ -876,7 +876,8 @@ NativeModule::~NativeModule() { ...@@ -876,7 +876,8 @@ NativeModule::~NativeModule() {
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker, WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed) size_t max_committed)
: memory_tracker_(memory_tracker), : memory_tracker_(memory_tracker),
remaining_uncommitted_code_space_(max_committed) { remaining_uncommitted_code_space_(max_committed),
critical_uncommitted_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory); DCHECK_LE(max_committed, kMaxWasmCodeMemory);
} }
...@@ -888,8 +889,8 @@ bool WasmCodeManager::Commit(Address start, size_t size) { ...@@ -888,8 +889,8 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
// Reserve the size. Use CAS loop to avoid underflow on // Reserve the size. Use CAS loop to avoid underflow on
// {remaining_uncommitted_}. Temporary underflow would allow concurrent // {remaining_uncommitted_}. Temporary underflow would allow concurrent
// threads to over-commit. // threads to over-commit.
size_t old_value = remaining_uncommitted_code_space_.load();
while (true) { while (true) {
size_t old_value = remaining_uncommitted_code_space_.load();
if (old_value < size) return false; if (old_value < size) return false;
if (remaining_uncommitted_code_space_.compare_exchange_weak( if (remaining_uncommitted_code_space_.compare_exchange_weak(
old_value, old_value - size)) { old_value, old_value - size)) {
...@@ -966,6 +967,7 @@ void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const { ...@@ -966,6 +967,7 @@ void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) { void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
remaining_uncommitted_code_space_.store(limit); remaining_uncommitted_code_space_.store(limit);
critical_uncommitted_code_space_.store(limit / 2);
} }
namespace { namespace {
...@@ -1018,30 +1020,22 @@ size_t WasmCodeManager::EstimateNativeModuleNonCodeSize( ...@@ -1018,30 +1020,22 @@ size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
return wasm_module_estimate + native_module_estimate; return wasm_module_estimate + native_module_estimate;
} }
bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
base::MutexGuard lock(&native_modules_mutex_);
// TODO(titzer): we force a critical memory pressure notification
// when the code space is almost exhausted, but only upon the next module
// creation. This is only for one isolate, and it should really do this for
// all isolates, at the point of commit.
constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
return native_modules_.size() > 1 &&
remaining_uncommitted_code_space_.load() < kCriticalThreshold;
}
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule( std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate, Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
bool can_request_more, std::shared_ptr<const WasmModule> module) { bool can_request_more, std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager()); DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
if (ShouldForceCriticalMemoryPressureNotification()) { if (remaining_uncommitted_code_space_.load() <
critical_uncommitted_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate)) (reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical); ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
critical_uncommitted_code_space_.store(
remaining_uncommitted_code_space_.load() / 2);
} }
// If the code must be contiguous, reserve enough address space up front. // If the code must be contiguous, reserve enough address space up front.
size_t code_vmem_size = size_t code_vmem_size =
kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate; kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
// Try up to three times; getting rid of dead JSArrayBuffer allocations might // Try up to two times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have // require two GCs because the first GC maybe incremental and may have
// floating garbage. // floating garbage.
static constexpr int kAllocationRetries = 2; static constexpr int kAllocationRetries = 2;
......
...@@ -520,10 +520,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { ...@@ -520,10 +520,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void FreeNativeModule(NativeModule*); void FreeNativeModule(NativeModule*);
void AssignRanges(Address start, Address end, NativeModule*); void AssignRanges(Address start, Address end, NativeModule*);
void AssignRangesAndAddModule(Address start, Address end, NativeModule*); void AssignRangesAndAddModule(Address start, Address end, NativeModule*);
bool ShouldForceCriticalMemoryPressureNotification();
WasmMemoryTracker* const memory_tracker_; WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_; std::atomic<size_t> remaining_uncommitted_code_space_;
// If the remaining uncommitted code space falls below
// {critical_uncommitted_code_space_}, then we trigger a GC before creating
// the next module. This value is initialized to 50% of the available code
// space on creation and after each GC.
std::atomic<size_t> critical_uncommitted_code_space_;
mutable base::Mutex native_modules_mutex_; mutable base::Mutex native_modules_mutex_;
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
......
...@@ -20,7 +20,7 @@ namespace internal { ...@@ -20,7 +20,7 @@ namespace internal {
namespace wasm { namespace wasm {
WasmEngine::WasmEngine() WasmEngine::WasmEngine()
: code_manager_(&memory_tracker_, kMaxWasmCodeMemory) {} : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() { WasmEngine::~WasmEngine() {
// All AsyncCompileJobs have been canceled. // All AsyncCompileJobs have been canceled.
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --wasm-max-code-space=1
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
// We only have 1 MB code space. This is enough for the code below, but for all
// 1000 modules, it requires several GCs to get rid of the old code.
const builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_i).addBody([kExprGetLocal, 0]);
const buffer = builder.toBuffer();
for (let i = 0; i < 1000; ++i) {
new WebAssembly.Module(buffer);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment