Commit 063ac044 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] Use guard pages for minicage

Change-Id: Ic9eaaa2af0106fea85a85599d404b2b304b3db2d
Reviewed-on: https://chromium-review.googlesource.com/930338Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Eric Holk <eholk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51533}
parent 3db1d4a5
...@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) { ...@@ -24,6 +24,9 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
// platforms, we always fall back on bounds checks. // platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
#else
static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes); size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count); DCHECK_GE(old_count + num_bytes, old_count);
...@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) { ...@@ -31,7 +34,6 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
return true; return true;
} }
allocated_address_space_ -= num_bytes; allocated_address_space_ -= num_bytes;
#endif
return false; return false;
} }
...@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size, ...@@ -44,59 +46,42 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions, bool require_guard_regions,
void** allocation_base, void** allocation_base,
size_t* allocation_length) { size_t* allocation_length) {
// TODO(eholk): Right now require_guard_regions has no effect on 32-bit // We always allocate the largest possible offset into the heap, so the
// systems. It may be safer to fail instead, given that other code might do // addressable memory after the guard page can be made inaccessible.
// things that would be unsafe if they expected guard pages where there *allocation_length = require_guard_regions
// weren't any. ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
if (require_guard_regions) { : base::bits::RoundUpToPowerOfTwo32(RoundUp(
// TODO(eholk): On Windows we want to make sure we don't commit the guard static_cast<uint32_t>(size), kWasmPageSize));
// pages yet. DCHECK_GE(*allocation_length, size);
// We always allocate the largest possible offset into the heap, so the WasmAllocationTracker* const allocation_tracker =
// addressable memory after the guard page can be made inaccessible. isolate->wasm_engine()->allocation_tracker();
size_t page_size = AllocatePageSize();
*allocation_length = RoundUp(kWasmMaxHeapOffset, page_size); // Let the WasmAllocationTracker know we are going to reserve a bunch of
DCHECK_EQ(0, size % page_size); // address space.
if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
WasmAllocationTracker* const allocation_tracker = // If we are over the address space limit, fail.
isolate->wasm_engine()->allocation_tracker(); return nullptr;
}
// Let the WasmAllocationTracker know we are going to reserve a bunch of
// address space.
if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
// If we are over the address space limit, fail.
return nullptr;
}
// Make the whole region inaccessible by default.
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
PageAllocator::kNoAccess);
if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
}
void* memory = *allocation_base; // The Reserve makes the whole region inaccessible by default.
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
// Make the part we care about accessible. PageAllocator::kNoAccess);
CHECK(SetPermissions(memory, size, PageAllocator::kReadWrite)); if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
reinterpret_cast<v8::Isolate*>(isolate) return nullptr;
->AdjustAmountOfExternalAllocatedMemory(size);
return memory;
} else {
// TODO(titzer): use guard regions for minicage and merge with above code.
CHECK_LE(size, kV8MaxWasmMemoryBytes);
*allocation_length =
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size));
void* memory =
size == 0
? nullptr
: isolate->array_buffer_allocator()->Allocate(*allocation_length);
*allocation_base = memory;
return memory;
} }
void* memory = *allocation_base;
// Make the part we care about accessible.
CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
return memory;
} }
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base, Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
...@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size, ...@@ -150,8 +135,10 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif #endif
constexpr bool is_external = false; constexpr bool is_external = false;
// All buffers have guard regions now, but sometimes they are small.
constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory, return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
size, is_external, require_guard_regions, shared); size, is_external, has_guard_region, shared);
} }
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer, void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
......
...@@ -443,15 +443,15 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, ...@@ -443,15 +443,15 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_pages > maximum_pages || pages > maximum_pages - old_pages) { if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
return Handle<JSArrayBuffer>::null(); return Handle<JSArrayBuffer>::null();
} }
const bool enable_guard_regions =
old_buffer.is_null() ? use_trap_handler : old_buffer->has_guard_region();
size_t new_size = size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize; static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize || if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
new_size > kMaxInt) { new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null(); return Handle<JSArrayBuffer>::null();
} }
if ((enable_guard_regions || old_size == new_size) && old_size != 0) { if (((use_trap_handler && new_size < old_buffer->allocation_length()) ||
old_size == new_size) &&
old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store()); DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) { if (old_size != new_size) {
CHECK(i::SetPermissions(old_mem_start, new_size, CHECK(i::SetPermissions(old_mem_start, new_size,
...@@ -478,13 +478,11 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, ...@@ -478,13 +478,11 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (pages != 0) { if (pages != 0) {
// Allocate a new buffer and memcpy the old contents. // Allocate a new buffer and memcpy the old contents.
free_memory = true; free_memory = true;
new_buffer = new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
wasm::NewArrayBuffer(isolate, new_size, enable_guard_regions);
if (new_buffer.is_null() || old_size == 0) return new_buffer; if (new_buffer.is_null() || old_size == 0) return new_buffer;
Address new_mem_start = static_cast<Address>(new_buffer->backing_store()); Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
memcpy(new_mem_start, old_mem_start, old_size); memcpy(new_mem_start, old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared()); DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
DCHECK(old_buffer.is_null() || !old_buffer->has_guard_region());
} else { } else {
// Reuse the prior backing store, but allocate a new array buffer. // Reuse the prior backing store, but allocate a new array buffer.
new_buffer = wasm::SetupArrayBuffer( new_buffer = wasm::SetupArrayBuffer(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment