Commit 18adaee4 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Store committed memory in WasmCodeManager

We currently store the {remaining_uncommitted_code_space_}. This CL
switches this to store {total_committed_code_space_} plus the maximum
allowed (in {max_committed_code_space_}). This counter will be used by
the GC to decide when to trigger a GC.

R=mstarzinger@chromium.org

Bug: v8:8217
Change-Id: I5946bbd3ba18e9fcbca4631afb942cd5b82834f5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1558084Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60702}
parent 8acae9be
...@@ -1055,8 +1055,9 @@ NativeModule::~NativeModule() { ...@@ -1055,8 +1055,9 @@ NativeModule::~NativeModule() {
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker, WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed) size_t max_committed)
: memory_tracker_(memory_tracker), : memory_tracker_(memory_tracker),
remaining_uncommitted_code_space_(max_committed), max_committed_code_space_(max_committed),
critical_uncommitted_code_space_(max_committed / 2) { total_committed_code_space_(0),
critical_committed_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory); DCHECK_LE(max_committed, kMaxWasmCodeMemory);
} }
...@@ -1065,14 +1066,14 @@ bool WasmCodeManager::Commit(Address start, size_t size) { ...@@ -1065,14 +1066,14 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
if (FLAG_perf_prof) return true; if (FLAG_perf_prof) return true;
DCHECK(IsAligned(start, AllocatePageSize())); DCHECK(IsAligned(start, AllocatePageSize()));
DCHECK(IsAligned(size, AllocatePageSize())); DCHECK(IsAligned(size, AllocatePageSize()));
// Reserve the size. Use CAS loop to avoid underflow on // Reserve the size. Use CAS loop to avoid overflow on
// {remaining_uncommitted_}. Temporary underflow would allow concurrent // {total_committed_code_space_}.
// threads to over-commit. size_t old_value = total_committed_code_space_.load();
size_t old_value = remaining_uncommitted_code_space_.load();
while (true) { while (true) {
if (old_value < size) return false; DCHECK_GE(max_committed_code_space_, old_value);
if (remaining_uncommitted_code_space_.compare_exchange_weak( if (size > max_committed_code_space_ - old_value) return false;
old_value, old_value - size)) { if (total_committed_code_space_.compare_exchange_weak(old_value,
old_value + size)) {
break; break;
} }
} }
...@@ -1088,10 +1089,10 @@ bool WasmCodeManager::Commit(Address start, size_t size) { ...@@ -1088,10 +1089,10 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
if (!ret) { if (!ret) {
// Highly unlikely. // Highly unlikely.
remaining_uncommitted_code_space_.fetch_add(size); total_committed_code_space_.fetch_sub(size);
return false; return false;
} }
return ret; return true;
} }
void WasmCodeManager::AssignRanges(Address start, Address end, void WasmCodeManager::AssignRanges(Address start, Address end,
...@@ -1129,8 +1130,10 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { ...@@ -1129,8 +1130,10 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
} }
void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) { void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
remaining_uncommitted_code_space_.store(limit); // This has to be set before committing any memory.
critical_uncommitted_code_space_.store(limit / 2); DCHECK_EQ(0, total_committed_code_space_.load());
max_committed_code_space_ = limit;
critical_committed_code_space_.store(limit / 2);
} }
// static // static
...@@ -1172,12 +1175,14 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule( ...@@ -1172,12 +1175,14 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t code_size_estimate, bool can_request_more, size_t code_size_estimate, bool can_request_more,
std::shared_ptr<const WasmModule> module) { std::shared_ptr<const WasmModule> module) {
DCHECK_EQ(this, isolate->wasm_engine()->code_manager()); DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
if (remaining_uncommitted_code_space_.load() < if (total_committed_code_space_.load() >
critical_uncommitted_code_space_.load()) { critical_committed_code_space_.load()) {
(reinterpret_cast<v8::Isolate*>(isolate)) (reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical); ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
critical_uncommitted_code_space_.store( size_t committed = total_committed_code_space_.load();
remaining_uncommitted_code_space_.load() / 2); DCHECK_GE(max_committed_code_space_, committed);
critical_committed_code_space_.store(
committed + (max_committed_code_space_ - committed) / 2);
} }
// If the code must be contiguous, reserve enough address space up front. // If the code must be contiguous, reserve enough address space up front.
...@@ -1367,9 +1372,9 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) { ...@@ -1367,9 +1372,9 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
size_t code_size = native_module->committed_code_space_.load(); size_t code_size = native_module->committed_code_space_.load();
DCHECK(IsAligned(code_size, AllocatePageSize())); DCHECK(IsAligned(code_size, AllocatePageSize()));
remaining_uncommitted_code_space_.fetch_add(code_size); size_t old_committed = total_committed_code_space_.fetch_sub(code_size);
// Remaining code space cannot grow bigger than maximum code space size. DCHECK_LE(code_size, old_committed);
DCHECK_LE(remaining_uncommitted_code_space_.load(), kMaxWasmCodeMemory); USE(old_committed);
} }
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const { NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
...@@ -1392,10 +1397,6 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const { ...@@ -1392,10 +1397,6 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
return candidate ? candidate->Lookup(pc) : nullptr; return candidate ? candidate->Lookup(pc) : nullptr;
} }
size_t WasmCodeManager::remaining_uncommitted_code_space() const {
return remaining_uncommitted_code_space_.load();
}
// TODO(v8:7424): Code protection scopes are not yet supported with shared code // TODO(v8:7424): Code protection scopes are not yet supported with shared code
// enabled and need to be revisited to work with --wasm-shared-code as well. // enabled and need to be revisited to work with --wasm-shared-code as well.
NativeModuleModificationScope::NativeModuleModificationScope( NativeModuleModificationScope::NativeModuleModificationScope(
......
...@@ -540,9 +540,23 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { ...@@ -540,9 +540,23 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
explicit WasmCodeManager(WasmMemoryTracker* memory_tracker, explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed); size_t max_committed);
#ifdef DEBUG
~WasmCodeManager() {
// No more committed code space.
DCHECK_EQ(0, total_committed_code_space_.load());
}
#endif
NativeModule* LookupNativeModule(Address pc) const; NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const; WasmCode* LookupCode(Address pc) const;
size_t remaining_uncommitted_code_space() const; size_t committed_code_space() const {
return total_committed_code_space_.load();
}
size_t remaining_uncommitted_code_space() const {
size_t committed = committed_code_space();
DCHECK_GE(max_committed_code_space_, committed);
return max_committed_code_space_ - committed;
}
void SetMaxCommittedMemoryForTesting(size_t limit); void SetMaxCommittedMemoryForTesting(size_t limit);
...@@ -570,12 +584,16 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { ...@@ -570,12 +584,16 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRanges(Address start, Address end, NativeModule*); void AssignRanges(Address start, Address end, NativeModule*);
WasmMemoryTracker* const memory_tracker_; WasmMemoryTracker* const memory_tracker_;
std::atomic<size_t> remaining_uncommitted_code_space_;
// If the remaining uncommitted code space falls below size_t max_committed_code_space_;
// {critical_uncommitted_code_space_}, then we trigger a GC before creating
// the next module. This value is initialized to 50% of the available code std::atomic<size_t> total_committed_code_space_;
// space on creation and after each GC. // If the committed code space exceeds {critical_committed_code_space_}, then
std::atomic<size_t> critical_uncommitted_code_space_; // we trigger a GC before creating the next module. This value is set to the
// currently committed space plus 50% of the available code space on creation
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
mutable base::Mutex native_modules_mutex_; mutable base::Mutex native_modules_mutex_;
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment