Commit 063ac044 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] Use guard pages for minicage

Change-Id: Ic9eaaa2af0106fea85a85599d404b2b304b3db2d
Reviewed-on: https://chromium-review.googlesource.com/930338Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Eric Holk <eholk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51533}
parent 3db1d4a5
......@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
#else
static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
......@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
return true;
}
allocated_address_space_ -= num_bytes;
#endif
return false;
}
......@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
// TODO(eholk): Right now require_guard_regions has no effect on 32-bit
// systems. It may be safer to fail instead, given that other code might do
// things that would be unsafe if they expected guard pages where there
// weren't any.
if (require_guard_regions) {
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
size_t page_size = AllocatePageSize();
*allocation_length = RoundUp(kWasmMaxHeapOffset, page_size);
DCHECK_EQ(0, size % page_size);
WasmAllocationTracker* const allocation_tracker =
isolate->wasm_engine()->allocation_tracker();
// Let the WasmAllocationTracker know we are going to reserve a bunch of
// address space.
if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
// If we are over the address space limit, fail.
return nullptr;
}
// Make the whole region inaccessible by default.
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
PageAllocator::kNoAccess);
if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
}
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
*allocation_length = require_guard_regions
? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
: base::bits::RoundUpToPowerOfTwo32(RoundUp(
static_cast<uint32_t>(size), kWasmPageSize));
DCHECK_GE(*allocation_length, size);
WasmAllocationTracker* const allocation_tracker =
isolate->wasm_engine()->allocation_tracker();
// Let the WasmAllocationTracker know we are going to reserve a bunch of
// address space.
if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
// If we are over the address space limit, fail.
return nullptr;
}
void* memory = *allocation_base;
// Make the part we care about accessible.
CHECK(SetPermissions(memory, size, PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
return memory;
} else {
// TODO(titzer): use guard regions for minicage and merge with above code.
CHECK_LE(size, kV8MaxWasmMemoryBytes);
*allocation_length =
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
void* memory =
size == 0
? nullptr
: isolate->array_buffer_allocator()->Allocate(*allocation_length);
*allocation_base = memory;
return memory;
// The Reserve makes the whole region inaccessible by default.
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
PageAllocator::kNoAccess);
if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
}
void* memory = *allocation_base;
// Make the part we care about accessible.
CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
return memory;
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
......@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif
constexpr bool is_external = false;
// All buffers have guard regions now, but sometimes they are small.
constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
size, is_external, require_guard_regions, shared);
size, is_external, has_guard_region, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
......
......@@ -443,15 +443,15 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null();
}
const bool enable_guard_regions =
old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null();
}
if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
if (((use_trap_handler && new_size < old_buffer->allocation_length()) ||
old_size == new_size) &&
old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
CHECK(i::SetPermissions(old_mem_start, new_size,
......@@ -478,13 +478,11 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (pages != 0) {
// Allocate a new buffer and memcpy the old contents.
free_memory = true;
new_buffer =
wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
if (new_buffer.is_null() || old_size == 0) return new_buffer;
Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
memcpy(new_mem_start, old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
} else {
// Reuse the prior backing store, but allocate a new array buffer.
new_buffer = wasm::SetupArrayBuffer(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment