Commit d7cdea6f authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] Add guard pages before Wasm Memory

Although Wasm memory indices are all unsigned, they sometimes get assembled
as 32-bit signed immediates. Values in the top half of the Wasm memory space
will then get sign extended, causing Wasm to access in front of its memory
buffer.

Usually this region is not mapped anyway, so faults still happen as they are
supposed to. This change protects this region with guard pages so we are
guaranteed to always fault when this happens.

Bug: v8:5277
Change-Id: Id791fbe2a5ac1b1d75460e65c72b5b9db2a47ee7
Reviewed-on: https://chromium-review.googlesource.com/484747
Commit-Queue: Eric Holk <eholk@chromium.org>
Reviewed-by: 's avatarMircea Trofin <mtrofin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#44905}
parent a711f281
......@@ -951,6 +951,19 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
#if DEBUG && V8_TRAP_HANDLER_SUPPORTED
// Make sure the guard regions are the correct size. We do this check
// dynamically to avoid introducing static initializers when computing this
// constant.
//
// kWasmMaxHeapOffset is the largest offset that a Wasm program can compute.
// We round up to the page size, since this all must be done on page
// granularities, and then multiply by 2 because we add guard regions before
// and after the Wasm heap so we can protect against negative offsets.
DCHECK(wasm::kTotalGuardRegionSize ==
2 * RoundUp(wasm::kWasmMaxHeapOffset, base::OS::CommitPageSize()));
#endif
// Setup errors
attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
......
......@@ -44,6 +44,12 @@ constexpr uint64_t kWasmMaxHeapOffset =
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
// The total size of the memory region to allocate when using trap handlers.
// The Wasm heap will be placed within this region, at offset
// kLowerGuardRegionSize.
constexpr uint64_t kTotalGuardRegionSize = (1LL << 34); // 16GB
constexpr uint64_t kLowerGuardRegionSize = kTotalGuardRegionSize / 2;
// Limit the control stack size of the C++ wasm interpreter.
constexpr size_t kV8MaxWasmInterpretedStackSize = 64 * 1024;
......
......@@ -60,13 +60,19 @@ static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
JSArrayBuffer* buffer = *p;
if (!buffer->was_neutered()) {
#if V8_TARGET_ARCH_64_BIT
void* memory = buffer->backing_store();
DCHECK(memory != nullptr);
base::OS::Free(memory,
RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
base::OS::Free(GetGuardRegionStartFromMemoryStart(memory),
kTotalGuardRegionSize);
data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
-buffer->byte_length()->Number());
#else
DCHECK(false &&
"Attempting to free guarded memory when trap handlers are not "
"supported");
#endif
}
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
......@@ -93,21 +99,23 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
// things that would be unsafe if they expected guard pages where there
// weren't any.
if (enable_guard_regions && kGuardRegionsSupported) {
#if V8_TARGET_ARCH_64_BIT
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
const size_t alloc_size =
RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
const size_t alloc_size = kTotalGuardRegionSize;
DCHECK_EQ(0, size % base::OS::CommitPageSize());
// AllocateGuarded makes the whole region inaccessible by default.
void* memory = base::OS::AllocateGuarded(alloc_size);
if (memory == nullptr) {
void* guard_region = base::OS::AllocateGuarded(alloc_size);
if (guard_region == nullptr) {
return nullptr;
}
void* memory = GetMemoryStartFromGuardRegionStart(guard_region);
// Make the part we care about accessible.
base::OS::Unprotect(memory, size);
......@@ -116,6 +124,10 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
is_external = true;
return memory;
#else
DCHECK(false && "Guard regions are not supported on this platform.");
return nullptr;
#endif
} else {
void* memory = isolate->array_buffer_allocator()->Allocate(size);
return memory;
......
......@@ -15,6 +15,7 @@
#include "src/parsing/preparse-data.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
......@@ -482,6 +483,16 @@ inline bool EnableGuardRegions() {
return FLAG_wasm_guard_pages && kGuardRegionsSupported;
}
inline void* GetMemoryStartFromGuardRegionStart(void* guard_region_start) {
DCHECK(guard_region_start != nullptr);
return static_cast<byte*>(guard_region_start) + kLowerGuardRegionSize;
}
inline void* GetGuardRegionStartFromMemoryStart(void* mem_start) {
DCHECK(mem_start != nullptr);
return static_cast<byte*>(mem_start) - kLowerGuardRegionSize;
}
void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
Handle<FixedArray> code_table);
......
......@@ -99,12 +99,17 @@ class TestingModule : public ModuleEnv {
~TestingModule() {
if (instance->mem_start) {
if (EnableGuardRegions() && module_.is_wasm()) {
#if V8_TARGET_ARCH_64_BIT
// See the corresponding code in AddMemory. We use a different
// allocation path when guard regions are enabled, which means we have
// to free it differently too.
const size_t alloc_size =
RoundUp(kWasmMaxHeapOffset, v8::base::OS::CommitPageSize());
v8::base::OS::Free(instance->mem_start, alloc_size);
const size_t alloc_size = kTotalGuardRegionSize;
v8::base::OS::Free(
GetGuardRegionStartFromMemoryStart(instance->mem_start),
alloc_size);
#else
DCHECK(false && "Guard regions are not supported on this platform");
#endif
} else {
free(instance->mem_start);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment