Commit 7eea8eda authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] Add negative guard regions

This change adds an addition 2GiB of guard region in front of guarded
WebAssembly memories. This provides defense in depth against code
generation errors, such as accidental sign extension of a 32-bit value.
As we consider supporting 4GiB WebAssembly memories, this is an
important line of defense.

Change-Id: Ica643c01ef28b5d2a01fbbd46273442545d8448e
Reviewed-on: https://chromium-review.googlesource.com/1108503
Commit-Queue: Eric Holk <eholk@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53949}
parent 476cd0f0
......@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "src/wasm/wasm-memory.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-engine.h"
......@@ -14,6 +16,8 @@ namespace wasm {
namespace {
constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, bool require_full_guard_regions,
void** allocation_base,
......@@ -24,9 +28,12 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
#endif
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
//
// To protect against 32-bit integer overflow issues, we also protect the 2GiB
// before the valid part of the memory buffer.
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
......@@ -68,7 +75,10 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
memory_tracker->AddAllocationStatusSample(AllocationStatus::kOtherFailure);
return nullptr;
}
void* memory = *allocation_base;
byte* memory = reinterpret_cast<byte*>(*allocation_base);
if (require_full_guard_regions) {
memory += kNegativeGuardSize;
}
// Make the part we care about accessible.
if (size > 0) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment