Commit 30d7d1a1 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] Removed shared empty backing store

This is no longer needed now that Wasm is more aggressive about
collecting old memories. It also causes problems with the upcoming trap
handler fallback path.

Change-Id: I4b8513c28e0c0d7c6b232d399c1d97b645499ef1
Reviewed-on: https://chromium-review.googlesource.com/1043277Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Eric Holk <eholk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53425}
parent 6e374c17
......@@ -91,12 +91,6 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
} // namespace
WasmMemoryTracker::~WasmMemoryTracker() {
if (empty_backing_store_.allocation_base != nullptr) {
CHECK(FreePages(empty_backing_store_.allocation_base,
empty_backing_store_.allocation_length));
InternalReleaseAllocation(empty_backing_store_.buffer_start);
}
// All reserved address space should be released before the allocation tracker
// is destroyed.
DCHECK_EQ(reserved_address_space_, 0u);
......@@ -144,9 +138,6 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
const void* buffer_start) {
if (IsEmptyBackingStore(buffer_start)) {
return AllocationData();
}
return InternalReleaseAllocation(buffer_start);
}
......@@ -187,39 +178,7 @@ bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
return allocations_.find(buffer_start) != allocations_.end();
}
void* WasmMemoryTracker::GetEmptyBackingStore(void** allocation_base,
size_t* allocation_length,
Heap* heap) {
if (empty_backing_store_.allocation_base == nullptr) {
constexpr size_t buffer_length = 0;
const bool require_full_guard_regions =
trap_handler::IsTrapHandlerEnabled();
void* local_allocation_base;
size_t local_allocation_length;
void* buffer_start = TryAllocateBackingStore(
this, heap, buffer_length, require_full_guard_regions,
&local_allocation_base, &local_allocation_length);
empty_backing_store_ =
AllocationData(local_allocation_base, local_allocation_length,
buffer_start, buffer_length);
}
*allocation_base = empty_backing_store_.allocation_base;
*allocation_length = empty_backing_store_.allocation_length;
return empty_backing_store_.buffer_start;
}
bool WasmMemoryTracker::IsEmptyBackingStore(const void* buffer_start) const {
return buffer_start == empty_backing_store_.buffer_start;
}
bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
if (IsEmptyBackingStore(buffer_start)) {
// We don't need to do anything for the empty backing store, because this
// will be freed when WasmMemoryTracker shuts down. Return true so callers
// will not try to free the buffer on their own.
return true;
}
if (IsWasmMemory(buffer_start)) {
const AllocationData allocation = ReleaseAllocation(buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
......@@ -274,27 +233,21 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr;
size_t allocation_length = 0;
void* memory;
if (size == 0) {
memory = memory_tracker->GetEmptyBackingStore(
&allocation_base, &allocation_length, isolate->heap());
} else {
#if V8_TARGET_ARCH_64_BIT
bool require_full_guard_regions = true;
bool require_full_guard_regions = true;
#else
bool require_full_guard_regions = false;
bool require_full_guard_regions = false;
#endif
void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
if (memory == nullptr && !trap_handler::IsTrapHandlerEnabled()) {
// If we failed to allocate with full guard regions, fall back on
// mini-guards.
require_full_guard_regions = false;
memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
if (memory == nullptr && !trap_handler::IsTrapHandlerEnabled()) {
// If we failed to allocate with full guard regions, fall back on
// mini-guards.
require_full_guard_regions = false;
memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
}
}
if (memory == nullptr) {
return {};
......
......@@ -73,14 +73,6 @@ class WasmMemoryTracker {
// buffer is not tracked.
const AllocationData* FindAllocationData(const void* buffer_start);
// Empty WebAssembly memories are all backed by a shared inaccessible
// reservation. This method creates this store or returns the existing one if
// already created.
void* GetEmptyBackingStore(void** allocation_base, size_t* allocation_length,
Heap* heap);
bool IsEmptyBackingStore(const void* buffer_start) const;
// Checks if a buffer points to a Wasm memory and if so does any necessary
// work to reclaim the buffer. If this function returns false, the caller must
// free the buffer manually.
......@@ -133,11 +125,6 @@ class WasmMemoryTracker {
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
// Empty backing stores still need to be backed by mapped pages when using
// trap handlers. Because this could eat up address space quickly, we keep a
// shared backing store here.
AllocationData empty_backing_store_;
// Keep pointers to
Histogram* allocation_result_;
Histogram* address_space_usage_mb_; // in MiB
......
......@@ -474,14 +474,12 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
// freeing it too early.
if (!old_buffer->is_external() && old_size != 0 &&
if (!old_buffer->is_external() &&
((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
DCHECK_NOT_NULL(old_buffer->backing_store());
// If adjusting permissions fails, propagate error back to return
// failure to grow.
DCHECK(!isolate->wasm_engine()->memory_tracker()->IsEmptyBackingStore(
old_mem_start));
if (!i::SetPermissions(old_mem_start, new_size,
PageAllocator::kReadWrite)) {
return {};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment