Commit 9c79b37a authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] use allocation tracker to track reserved address space

This is a step towards falling back on bounds checks when there are too many
guarded Wasm memories.

Bug: v8:7143
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I01916cbdd5ddb08fe1d946ab83b801f37a8fe1c6
Reviewed-on: https://chromium-review.googlesource.com/832944
Commit-Queue: Eric Holk <eholk@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50390}
parent d18f4ec1
......@@ -7645,6 +7645,14 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
if (self->has_guard_region()) {
// Since this is being externalized, the Wasm Allocation Tracker can no
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
self->allocation_length());
}
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
......@@ -7860,6 +7868,14 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
if (self->has_guard_region()) {
// Since this is being externalized, the Wasm Allocation Tracker can no
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
self->allocation_length());
}
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
}
......
......@@ -73,6 +73,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
#include "src/utils-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
......@@ -18958,6 +18959,13 @@ void JSArrayBuffer::FreeBackingStore() {
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
// TODO(eholk): check with WasmAllocationTracker to make sure this is
// actually a buffer we are tracking.
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
allocation.length);
}
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length, allocation.mode);
}
......
......@@ -9,14 +9,13 @@
#include "src/wasm/compilation-manager.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
namespace v8 {
namespace internal {
namespace wasm {
class CompilationManager;
// The central data structure that represents an engine instance capable of
// loading, instantiating, and executing WASM code.
class WasmEngine {
......@@ -28,9 +27,14 @@ class WasmEngine {
WasmCodeManager* code_manager() const { return code_manager_.get(); }
WasmAllocationTracker* allocation_tracker() { return &allocation_tracker_; }
private:
CompilationManager compilation_manager_;
std::unique_ptr<WasmCodeManager> code_manager_;
WasmAllocationTracker allocation_tracker_;
DISALLOW_COPY_AND_ASSIGN(WasmEngine);
};
} // namespace wasm
......
......@@ -4,6 +4,7 @@
#include "src/wasm/wasm-memory.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
......@@ -11,30 +12,70 @@ namespace v8 {
namespace internal {
namespace wasm {
WasmAllocationTracker::~WasmAllocationTracker() {
// All reserved address space should be released before the allocation tracker
// is destroyed.
DCHECK_EQ(allocated_address_space_, 0);
}
bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// Address space reservations are currently only meaningful using guard
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
size_t const new_count = allocated_address_space_ + num_bytes;
DCHECK_GE(new_count, allocated_address_space_);
if (new_count <= kAddressSpaceLimit) {
allocated_address_space_ = new_count;
return true;
}
#endif
return false;
}
void WasmAllocationTracker::ReleaseAddressSpace(size_t num_bytes) {
DCHECK_LE(num_bytes, allocated_address_space_);
allocated_address_space_ -= num_bytes;
}
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool enable_guard_regions, void*& allocation_base,
size_t& allocation_length) {
// TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
// TODO(eholk): Right now require_guard_regions has no effect on 32-bit
// systems. It may be safer to fail instead, given that other code might do
// things that would be unsafe if they expected guard pages where there
// weren't any.
if (enable_guard_regions) {
if (require_guard_regions) {
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
*allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
WasmAllocationTracker* const allocation_tracker =
isolate->wasm_engine()->allocation_tracker();
// Let the WasmAllocationTracker know we are going to reserve a bunch of
// address space.
if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
// If we are over the address space limit, fail.
return nullptr;
}
// The Reserve makes the whole region inaccessible by default.
allocation_base =
isolate->array_buffer_allocator()->Reserve(allocation_length);
if (allocation_base == nullptr) {
*allocation_base =
isolate->array_buffer_allocator()->Reserve(*allocation_length);
if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
}
void* memory = allocation_base;
void* memory = *allocation_base;
// Make the part we care about accessible.
isolate->array_buffer_allocator()->SetProtection(
......@@ -47,8 +88,8 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
} else {
void* memory =
size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
allocation_base = memory;
allocation_length = size;
*allocation_base = memory;
*allocation_length = size;
return memory;
}
}
......@@ -73,7 +114,7 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
}
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
bool enable_guard_regions,
bool require_guard_regions,
SharedFlag shared) {
// Check against kMaxInt, since the byte length is stored as int in the
// JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
......@@ -87,10 +128,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr; // Set by TryAllocateBackingStore
size_t allocation_length = 0; // Set by TryAllocateBackingStore
// Do not reserve memory till non zero memory is encountered.
void* memory =
(size == 0) ? nullptr
: TryAllocateBackingStore(isolate, size, enable_guard_regions,
allocation_base, allocation_length);
void* memory = (size == 0) ? nullptr
: TryAllocateBackingStore(
isolate, size, require_guard_regions,
&allocation_base, &allocation_length);
if (size > 0 && memory == nullptr) {
return Handle<JSArrayBuffer>::null();
......@@ -106,7 +147,7 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
constexpr bool is_external = false;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
size, is_external, enable_guard_regions, shared);
size, is_external, require_guard_regions, shared);
}
void ExternalizeMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
......
......@@ -13,8 +13,28 @@ namespace v8 {
namespace internal {
namespace wasm {
class WasmAllocationTracker {
public:
WasmAllocationTracker() {}
~WasmAllocationTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter
// to determine whether there is enough headroom to allocate another guarded
// Wasm memory. Returns true if successful (meaning it is okay to go ahead and
// allocate the buffer), false otherwise.
bool ReserveAddressSpace(size_t num_bytes);
// Reduces the address space counter so that the space can be reused.
void ReleaseAddressSpace(size_t num_bytes);
private:
size_t allocated_address_space_ = 0;
DISALLOW_COPY_AND_ASSIGN(WasmAllocationTracker);
};
Handle<JSArrayBuffer> NewArrayBuffer(
Isolate*, size_t size, bool enable_guard_regions,
Isolate*, size_t size, bool require_guard_regions,
SharedFlag shared = SharedFlag::kNotShared);
Handle<JSArrayBuffer> SetupArrayBuffer(
......
......@@ -172,3 +172,26 @@ function testOOBThrows() {
}
testOOBThrows();
function testAddressSpaceLimit() {
// 1TiB, see wasm-memory.h
const kMaxAddressSpace = 1 * 1024 * 1024 * 1024 * 1024;
const kAddressSpacePerMemory = 8 * 1024 * 1024 * 1024;
try {
let memories = [];
let address_space = 0;
while (address_space <= kMaxAddressSpace + 1) {
memories.push(new WebAssembly.Memory({initial: 1}));
address_space += kAddressSpacePerMemory;
}
} catch (e) {
assertTrue(e instanceof RangeError);
return;
}
failWithMessage("allocated too much memory");
}
if(%IsWasmTrapHandlerEnabled()) {
testAddressSpaceLimit();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment