Commit d31dff84 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] always allocate memory when guard regions are needed

When using trap handlers, memory references do not get any checks inserted. This
means there is no check for a null memory as happens when the memory size is
0. Normally this would be correctly caught as an out of bounds access, since the
low memory addresses are not normally mapped. However, if they were mapped for
some reason, we would not catch the out of bounds access.

The fix is to ensure WebAssembly instances always have a guard region even if
the memory is size 0.

This is a rewrite of 5e76ff5a

Note that this can lead to a large amount of unnecessary address space usage,
so we share a single reservation for empty array buffers.

Bug: chromium:769637

Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: Ia8e84be6d595e347d3d342959f2c374db1a3f683
Reviewed-on: https://chromium-review.googlesource.com/702657Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Eric Holk <eholk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52163}
parent 2589ea08
......@@ -57,7 +57,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
backing_stores_to_free->emplace_back(
allocation_base, old_buffer->allocation_length(),
old_buffer->backing_store(), old_buffer->allocation_mode());
old_buffer->backing_store(), old_buffer->allocation_mode(),
old_buffer->is_wasm_memory());
it = array_buffers_.erase(it);
} else {
UNREACHABLE();
......
......@@ -19051,8 +19051,9 @@ void JSArrayBuffer::FreeBackingStore() {
if (allocation_base() == nullptr) {
return;
}
FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
backing_store(), allocation_mode()});
FreeBackingStore(GetIsolate(),
{allocation_base(), allocation_length(), backing_store(),
allocation_mode(), is_wasm_memory()});
// Zero out the backing store and allocation base to avoid dangling
// pointers.
set_backing_store(nullptr);
......@@ -19061,12 +19062,17 @@ void JSArrayBuffer::FreeBackingStore() {
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
bool needs_free = true;
if (allocation.is_wasm_memory) {
wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker();
if (memory_tracker->IsWasmMemory(allocation.backing_store)) {
memory_tracker->ReleaseAllocation(allocation.backing_store);
if (memory_tracker->FreeMemoryIfIsWasmMemory(allocation.backing_store)) {
needs_free = false;
}
}
if (needs_free) {
CHECK(FreePages(allocation.allocation_base, allocation.length));
}
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
......
......@@ -115,9 +115,9 @@ ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
bool JSArrayBuffer::is_wasm_memory() const {
bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
DCHECK(backing_store() == nullptr ||
DCHECK_EQ(is_wasm_memory,
GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
backing_store()) == is_wasm_memory);
backing_store()));
return is_wasm_memory;
}
......
......@@ -173,16 +173,18 @@ class JSArrayBuffer : public JSObject {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
Allocation(void* allocation_base, size_t length, void* backing_store,
AllocationMode mode)
AllocationMode mode, bool is_wasm_memory)
: allocation_base(allocation_base),
length(length),
backing_store(backing_store),
mode(mode) {}
mode(mode),
is_wasm_memory(is_wasm_memory) {}
void* allocation_base;
size_t length;
void* backing_store;
AllocationMode mode;
bool is_wasm_memory;
};
// Returns whether the buffer is tracked by the WasmMemoryTracker.
......
......@@ -978,10 +978,8 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
DCHECK(it.is_wasm());
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
uint8_t* mem_start = reinterpret_cast<uint8_t*>(frame->wasm_instance()
->memory_object()
->array_buffer()
->allocation_base());
uint8_t* mem_start = reinterpret_cast<uint8_t*>(
frame->wasm_instance()->memory_object()->array_buffer()->backing_store());
int func_index = frame->function_index();
int pos = frame->position();
// TODO(titzer): eliminate dependency on WasmModule definition here.
......
......@@ -1719,7 +1719,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
DCHECK_IMPLIES(use_trap_handler(), module_->is_asm_js() ||
memory->is_wasm_memory() ||
memory->backing_store() == nullptr);
} else if (initial_pages > 0) {
} else if (initial_pages > 0 || use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
// even when the size is zero to prevent null-derefence issues
// (e.g. https://crbug.com/769637).
// Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages);
if (memory_.is_null()) return {}; // failed to allocate memory
......
......@@ -12,7 +12,18 @@ namespace v8 {
namespace internal {
namespace wasm {
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length);
WasmMemoryTracker::~WasmMemoryTracker() {
if (empty_backing_store_.allocation_base != nullptr) {
CHECK(FreePages(empty_backing_store_.allocation_base,
empty_backing_store_.allocation_length));
InternalReleaseAllocation(empty_backing_store_.buffer_start);
}
// All reserved address space should be released before the allocation tracker
// is destroyed.
DCHECK_EQ(reserved_address_space_, 0u);
......@@ -24,9 +35,9 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
#else
static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif
size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
......@@ -65,6 +76,14 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
const void* buffer_start) {
if (IsEmptyBackingStore(buffer_start)) {
return AllocationData();
}
return InternalReleaseAllocation(buffer_start);
}
WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start);
......@@ -84,11 +103,6 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
UNREACHABLE();
}
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
......@@ -99,7 +113,51 @@ const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
return nullptr;
}
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
void* WasmMemoryTracker::GetEmptyBackingStore(void** allocation_base,
size_t* allocation_length) {
if (empty_backing_store_.allocation_base == nullptr) {
constexpr size_t buffer_length = 0;
const bool require_guard_regions = trap_handler::IsTrapHandlerEnabled();
void* local_allocation_base;
size_t local_allocation_length;
void* buffer_start = TryAllocateBackingStore(
this, buffer_length, require_guard_regions, &local_allocation_base,
&local_allocation_length);
empty_backing_store_ =
AllocationData(local_allocation_base, local_allocation_length,
buffer_start, buffer_length);
}
*allocation_base = empty_backing_store_.allocation_base;
*allocation_length = empty_backing_store_.allocation_length;
return empty_backing_store_.buffer_start;
}
bool WasmMemoryTracker::IsEmptyBackingStore(const void* buffer_start) const {
return buffer_start == empty_backing_store_.buffer_start;
}
bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
if (IsEmptyBackingStore(buffer_start)) {
// We don't need to do anything for the empty backing store, because this
// will be freed when WasmMemoryTracker shuts down. Return true so callers
// will not try to free the buffer on their own.
return true;
}
if (IsWasmMemory(buffer_start)) {
const AllocationData allocation = ReleaseAllocation(buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
return true;
}
return false;
}
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
......@@ -108,16 +166,15 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
#endif
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
*allocation_length = require_guard_regions
*allocation_length =
require_guard_regions
? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
: base::bits::RoundUpToPowerOfTwo32(RoundUp(
static_cast<uint32_t>(size), kWasmPageSize));
: RoundUp(
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
WasmMemoryTracker* const memory_tracker =
isolate->wasm_engine()->memory_tracker();
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
if (!memory_tracker->ReserveAddressSpace(*allocation_length)) {
......@@ -135,11 +192,10 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
void* memory = *allocation_base;
// Make the part we care about accessible.
if (size > 0) {
CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
}
memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
memory, size);
......@@ -172,12 +228,16 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
return Handle<JSArrayBuffer>::null();
}
void* allocation_base = nullptr; // Set by TryAllocateBackingStore
size_t allocation_length = 0; // Set by TryAllocateBackingStore
// Do not reserve memory till non zero memory is encountered.
void* memory = (size == 0) ? nullptr
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
// Set by TryAllocateBackingStore or GetEmptyBackingStore
void* allocation_base = nullptr;
size_t allocation_length = 0;
void* memory = (size == 0) ? memory_tracker->GetEmptyBackingStore(
&allocation_base, &allocation_length)
: TryAllocateBackingStore(
isolate, size, require_guard_regions,
memory_tracker, size, require_guard_regions,
&allocation_base, &allocation_length);
if (size > 0 && memory == nullptr) {
......@@ -192,6 +252,9 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
}
#endif
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
constexpr bool is_external = false;
return SetupArrayBuffer(isolate, memory, size, is_external, shared);
}
......
......@@ -31,12 +31,13 @@ class WasmMemoryTracker {
void* buffer_start, size_t buffer_length);
struct AllocationData {
void* const allocation_base = nullptr;
size_t const allocation_length = 0;
void* const buffer_start = nullptr;
size_t const buffer_length = 0;
void* allocation_base = nullptr;
size_t allocation_length = 0;
void* buffer_start = nullptr;
size_t buffer_length = 0;
private:
AllocationData() = default;
AllocationData(void* allocation_base, size_t allocation_length,
void* buffer_start, size_t buffer_length)
: allocation_base(allocation_base),
......@@ -68,7 +69,21 @@ class WasmMemoryTracker {
// buffer is not tracked.
const AllocationData* FindAllocationData(const void* buffer_start);
// Empty WebAssembly memories are all backed by a shared inaccessible
// reservation. This method creates this store or returns the existing one if
// already created.
void* GetEmptyBackingStore(void** allocation_base, size_t* allocation_length);
bool IsEmptyBackingStore(const void* buffer_start) const;
// Checks if a buffer points to a Wasm memory and if so does any necessary
// work to reclaim the buffer. If this function returns false, the caller must
// free the buffer manually.
bool FreeMemoryIfIsWasmMemory(const void* buffer_start);
private:
AllocationData InternalReleaseAllocation(const void* buffer_start);
// Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether
// doing the allocation would put us over our limit. Once there is a
......@@ -89,6 +104,11 @@ class WasmMemoryTracker {
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
// Empty backing stores still need to be backed by mapped pages when using
// trap handlers. Because this could eat up address space quickly, we keep a
// shared backing store here.
AllocationData empty_backing_store_;
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
......
......@@ -371,6 +371,7 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null();
}
// Try to adjust the permissions and reuse the old backing store
if (((use_trap_handler && !old_buffer->is_external() &&
new_size < old_buffer->allocation_length()) ||
old_size == new_size) &&
......@@ -379,6 +380,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_size != new_size) {
// If adjusting permissions fails, propagate error back to return
// failure to grow.
DCHECK(!isolate->wasm_engine()->memory_tracker()->IsEmptyBackingStore(
old_mem_start));
if (!i::SetPermissions(old_mem_start, new_size,
PageAllocator::kReadWrite)) {
return Handle<JSArrayBuffer>::null();
......@@ -396,6 +399,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
return new_buffer;
} else {
// We couldn't reuse the old backing store, so create a new one and copy the
// old contents in.
Handle<JSArrayBuffer> new_buffer;
new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
if (new_buffer.is_null() || old_size == 0) return new_buffer;
......
......@@ -1091,9 +1091,10 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
CHECK_EQ(16, result);
constexpr bool is_wasm_memory = true;
const JSArrayBuffer::Allocation allocation{
contents.AllocationBase(), contents.AllocationLength(), contents.Data(),
contents.AllocationMode()};
contents.AllocationMode(), is_wasm_memory};
JSArrayBuffer::FreeBackingStore(isolate, allocation);
}
Cleanup();
......@@ -1114,9 +1115,10 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
isolate, 16 * kWasmPageSize, require_guard_regions);
auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
wasm::DetachMemoryBuffer(isolate, buffer, true);
constexpr bool is_wasm_memory = true;
const JSArrayBuffer::Allocation allocation{
contents.AllocationBase(), contents.AllocationLength(), contents.Data(),
contents.AllocationMode()};
contents.AllocationMode(), is_wasm_memory};
JSArrayBuffer::FreeBackingStore(isolate, allocation);
}
Cleanup();
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder
.addMemory()
.addFunction("main", kSig_v_v)
.addBody([kExprI32Const, 4,
kExprI32Const, 8,
kExprI32StoreMem, 0, 16])
.exportAs("main");
let instance = builder.instantiate();
assertTraps(kTrapMemOutOfBounds, instance.exports.main);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment