Commit 6e06d756 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[sandbox] Disallow executable pages inside the sandbox

These should not be allowed inside the sandbox as they could be
corrupted by an attacker, thus posing a security risk. Furthermore,
executable pages require MAP_JIT on macOS, which causes fork() to become
excessively slow, in turn causing tests to time out.
Due to this, the sandbox now requires the external code space.

In addition, this CL adds a max_page_permissions member to the
VirtualAddressSpace API to make it possible to verify the maximum
permissions of a subspace.

Bug: v8:10391
Change-Id: Ib9562ecff6f018696bfa25143113d8583d1ec6cd
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3460406Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79119}
parent 890ce6fd
......@@ -489,7 +489,8 @@ if (v8_enable_shared_ro_heap == "") {
}
# Enable the v8 sandbox on 64-bit Chromium builds.
if (build_with_chromium && v8_enable_pointer_compression_shared_cage) {
if (build_with_chromium && v8_enable_pointer_compression_shared_cage &&
v8_enable_external_code_space) {
v8_enable_sandbox = true
}
......@@ -523,7 +524,10 @@ assert(!v8_enable_external_code_space || v8_enable_pointer_compression,
"External code space feature requires pointer compression")
assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage,
"Sandbox requires the shared pointer compression cage")
"The sandbox requires the shared pointer compression cage")
assert(!v8_enable_sandbox || v8_enable_external_code_space,
"The sandbox requires the external code space")
assert(!v8_enable_sandboxed_pointers || v8_enable_sandbox,
"Sandboxed pointers require the sandbox")
......
......@@ -401,6 +401,8 @@ class PageAllocator {
// this is used to set the MAP_JIT flag on Apple Silicon.
// TODO(jkummerow): Remove this when Wasm has a platform-independent
// w^x implementation.
// TODO(saelo): Remove this once all JIT pages are allocated through the
// VirtualAddressSpace API.
kNoAccessWillJitLater
};
......@@ -511,7 +513,7 @@ class PageAllocator {
};
/**
* Page permissions.
* Possible permissions for memory pages.
*/
enum class PagePermissions {
kNoAccess,
......@@ -534,11 +536,13 @@ class VirtualAddressSpace {
using Address = uintptr_t;
VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
Address base, size_t size)
Address base, size_t size,
PagePermissions max_page_permissions)
: page_size_(page_size),
allocation_granularity_(allocation_granularity),
base_(base),
size_(size) {}
size_(size),
max_page_permissions_(max_page_permissions) {}
virtual ~VirtualAddressSpace() = default;
......@@ -575,6 +579,14 @@ class VirtualAddressSpace {
*/
size_t size() const { return size_; }
/**
* The maximum page permissions that pages allocated inside this space can
* obtain.
*
* \returns the maximum page permissions.
*/
PagePermissions max_page_permissions() const { return max_page_permissions_; }
/**
* Sets the random seed so that GetRandomPageAddress() will generate
* repeatable sequences of random addresses.
......@@ -703,14 +715,14 @@ class VirtualAddressSpace {
* \param alignment The alignment of the subspace in bytes. Must be a multiple
* of the allocation_granularity() and should be a power of two.
*
* \param max_permissions The maximum permissions that pages allocated in the
* subspace can obtain.
* \param max_page_permissions The maximum permissions that pages allocated in
* the subspace can obtain.
*
* \returns a new subspace or nullptr on failure.
*/
virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) = 0;
PagePermissions max_page_permissions) = 0;
//
// TODO(v8) maybe refactor the methods below before stabilizing the API. For
......@@ -750,6 +762,7 @@ class VirtualAddressSpace {
const size_t allocation_granularity_;
const Address base_;
const size_t size_;
const PagePermissions max_page_permissions_;
};
/**
......
......@@ -16,7 +16,7 @@ EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace(
size_t total_size)
: VirtualAddressSpace(parent_space->page_size(),
parent_space->allocation_granularity(), base,
total_size),
total_size, parent_space->max_page_permissions()),
mapped_size_(mapped_size),
parent_space_(parent_space),
region_allocator_(base, mapped_size, parent_space_->page_size()) {
......@@ -139,7 +139,7 @@ bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() {
std::unique_ptr<v8::VirtualAddressSpace>
EmulatedVirtualAddressSubspace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) {
PagePermissions max_page_permissions) {
UNREACHABLE();
}
......
......@@ -61,7 +61,7 @@ class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
......
......@@ -196,12 +196,11 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
// Memory permissions. These should be kept in sync with the ones in
// v8::PageAllocator.
// v8::PageAllocator and v8::PagePermissions.
enum class MemoryPermission {
kNoAccess,
kRead,
kReadWrite,
// TODO(hpayer): Remove this flag. Memory should never be rwx.
kReadWriteExecute,
kReadExecute,
// TODO(jkummerow): Remove this when Wasm has a platform-independent
......
......@@ -17,7 +17,8 @@ namespace base {
LsanVirtualAddressSpace::LsanVirtualAddressSpace(
std::unique_ptr<v8::VirtualAddressSpace> vas)
: VirtualAddressSpace(vas->page_size(), vas->allocation_granularity(),
vas->base(), vas->size()),
vas->base(), vas->size(),
vas->max_page_permissions()),
vas_(std::move(vas)) {
DCHECK_NOT_NULL(vas_);
}
......@@ -46,9 +47,9 @@ bool LsanVirtualAddressSpace::FreePages(Address address, size_t size) {
std::unique_ptr<VirtualAddressSpace> LsanVirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) {
PagePermissions max_page_permissions) {
auto subspace =
vas_->AllocateSubspace(hint, size, alignment, max_permissions);
vas_->AllocateSubspace(hint, size, alignment, max_page_permissions);
#if defined(LEAK_SANITIZER)
if (subspace) {
subspace = std::make_unique<LsanVirtualAddressSpace>(std::move(subspace));
......
......@@ -52,7 +52,7 @@ class V8_BASE_EXPORT LsanVirtualAddressSpace final
std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override {
return vas_->DiscardSystemPages(address, size);
......
......@@ -26,10 +26,34 @@ STATIC_ASSERT_ENUM(PagePermissions::kReadExecute,
#undef STATIC_ASSERT_ENUM
namespace {
uint8_t PagePermissionsToBitset(PagePermissions permissions) {
switch (permissions) {
case PagePermissions::kNoAccess:
return 0b000;
case PagePermissions::kRead:
return 0b100;
case PagePermissions::kReadWrite:
return 0b110;
case PagePermissions::kReadWriteExecute:
return 0b111;
case PagePermissions::kReadExecute:
return 0b101;
}
}
} // namespace
bool IsSubset(PagePermissions lhs, PagePermissions rhs) {
uint8_t lhs_bits = PagePermissionsToBitset(lhs);
uint8_t rhs_bits = PagePermissionsToBitset(rhs);
return (lhs_bits & rhs_bits) == lhs_bits;
}
VirtualAddressSpace::VirtualAddressSpace()
: VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(),
kNullAddress,
std::numeric_limits<uintptr_t>::max()) {
std::numeric_limits<uintptr_t>::max(),
PagePermissions::kReadWriteExecute) {
#if V8_OS_WIN
// On Windows, this additional step is required to lookup the VirtualAlloc2
// and friends functions.
......@@ -103,7 +127,7 @@ bool VirtualAddressSpace::CanAllocateSubspaces() {
std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) {
PagePermissions max_page_permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
......@@ -111,11 +135,11 @@ std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
base::Optional<AddressSpaceReservation> reservation =
OS::CreateAddressSpaceReservation(
reinterpret_cast<void*>(hint), size, alignment,
static_cast<OS::MemoryPermission>(max_permissions));
static_cast<OS::MemoryPermission>(max_page_permissions));
if (!reservation.has_value())
return std::unique_ptr<v8::VirtualAddressSpace>();
return std::unique_ptr<v8::VirtualAddressSpace>(
new VirtualAddressSubspace(*reservation, this));
new VirtualAddressSubspace(*reservation, this, max_page_permissions));
}
bool VirtualAddressSpace::DiscardSystemPages(Address address, size_t size) {
......@@ -137,10 +161,12 @@ bool VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) {
}
VirtualAddressSubspace::VirtualAddressSubspace(
AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space)
: VirtualAddressSpaceBase(
parent_space->page_size(), parent_space->allocation_granularity(),
reinterpret_cast<Address>(reservation.base()), reservation.size()),
AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space,
PagePermissions max_page_permissions)
: VirtualAddressSpaceBase(parent_space->page_size(),
parent_space->allocation_granularity(),
reinterpret_cast<Address>(reservation.base()),
reservation.size(), max_page_permissions),
reservation_(reservation),
region_allocator_(reinterpret_cast<Address>(reservation.base()),
reservation.size(),
......@@ -183,6 +209,7 @@ Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size,
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
DCHECK(IsSubset(permissions, max_page_permissions()));
MutexGuard guard(&mutex_);
......@@ -218,6 +245,7 @@ bool VirtualAddressSubspace::SetPagePermissions(Address address, size_t size,
PagePermissions permissions) {
DCHECK(IsAligned(address, page_size()));
DCHECK(IsAligned(size, page_size()));
DCHECK(IsSubset(permissions, max_page_permissions()));
return reservation_.SetPermissions(
reinterpret_cast<void*>(address), size,
......@@ -247,10 +275,11 @@ bool VirtualAddressSubspace::FreeGuardRegion(Address address, size_t size) {
std::unique_ptr<v8::VirtualAddressSpace>
VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
size_t alignment,
PagePermissions max_permissions) {
PagePermissions max_page_permissions) {
DCHECK(IsAligned(alignment, allocation_granularity()));
DCHECK(IsAligned(hint, alignment));
DCHECK(IsAligned(size, allocation_granularity()));
DCHECK(IsSubset(max_page_permissions, this->max_page_permissions()));
MutexGuard guard(&mutex_);
......@@ -262,13 +291,13 @@ VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
base::Optional<AddressSpaceReservation> reservation =
reservation_.CreateSubReservation(
reinterpret_cast<void*>(address), size,
static_cast<OS::MemoryPermission>(max_permissions));
static_cast<OS::MemoryPermission>(max_page_permissions));
if (!reservation.has_value()) {
CHECK_EQ(size, region_allocator_.FreeRegion(address));
return nullptr;
}
return std::unique_ptr<v8::VirtualAddressSpace>(
new VirtualAddressSubspace(*reservation, this));
new VirtualAddressSubspace(*reservation, this, max_page_permissions));
}
bool VirtualAddressSubspace::DiscardSystemPages(Address address, size_t size) {
......
......@@ -35,6 +35,12 @@ class VirtualAddressSpaceBase
virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0;
};
/*
* Helper routine to determine whether one set of page permissions (the lhs) is
* a subset of another one (the rhs).
*/
V8_BASE_EXPORT bool IsSubset(PagePermissions lhs, PagePermissions rhs);
/*
* The virtual address space of the current process. Conceptionally, there
* should only be one such "root" instance. However, in practice there is no
......@@ -66,7 +72,7 @@ class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase {
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
......@@ -104,7 +110,7 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
Address hint, size_t size, size_t alignment,
PagePermissions max_permissions) override;
PagePermissions max_page_permissions) override;
bool DiscardSystemPages(Address address, size_t size) override;
......@@ -118,7 +124,8 @@ class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
bool FreeSubspace(VirtualAddressSubspace* subspace) override;
VirtualAddressSubspace(AddressSpaceReservation reservation,
VirtualAddressSpaceBase* parent_space);
VirtualAddressSpaceBase* parent_space,
PagePermissions max_page_permissions);
// The address space reservation backing this subspace.
AddressSpaceReservation reservation_;
......
......@@ -184,11 +184,15 @@ bool Sandbox::Initialize(v8::VirtualAddressSpace* vas, size_t size,
Address hint = RoundDown(vas->RandomPageAddress(), kSandboxAlignment);
// Currently, executable memory is still allocated inside the sandbox. In
// the future, we should drop that and use kReadWrite as max_permissions.
address_space_ =
vas->AllocateSubspace(hint, reservation_size, kSandboxAlignment,
PagePermissions::kReadWriteExecute);
// There should be no executable pages mapped inside the sandbox since
// those could be corrupted by an attacker and therefore pose a security
// risk. Furthermore, allowing executable mappings in the sandbox requires
// MAP_JIT on macOS, which causes fork() to become excessively slow
// (multiple seconds or even minutes for a 1TB sandbox on macOS 12.X), in
// turn causing tests to time out. As such, the maximum page permission
// inside the sandbox should be read + write.
address_space_ = vas->AllocateSubspace(
hint, reservation_size, kSandboxAlignment, PagePermissions::kReadWrite);
if (!address_space_) {
size /= 2;
}
......
......@@ -98,6 +98,44 @@ void TestParentSpaceCannotAllocateInChildSpace(v8::VirtualAddressSpace* parent,
}
}
TEST(VirtualAddressSpaceTest, TestPagePermissionSubsets) {
const PagePermissions kNoAccess = PagePermissions::kNoAccess;
const PagePermissions kRead = PagePermissions::kRead;
const PagePermissions kReadWrite = PagePermissions::kReadWrite;
const PagePermissions kReadWriteExecute = PagePermissions::kReadWriteExecute;
const PagePermissions kReadExecute = PagePermissions::kReadExecute;
EXPECT_TRUE(IsSubset(kNoAccess, kNoAccess));
EXPECT_FALSE(IsSubset(kRead, kNoAccess));
EXPECT_FALSE(IsSubset(kReadWrite, kNoAccess));
EXPECT_FALSE(IsSubset(kReadWriteExecute, kNoAccess));
EXPECT_FALSE(IsSubset(kReadExecute, kNoAccess));
EXPECT_TRUE(IsSubset(kNoAccess, kRead));
EXPECT_TRUE(IsSubset(kRead, kRead));
EXPECT_FALSE(IsSubset(kReadWrite, kRead));
EXPECT_FALSE(IsSubset(kReadWriteExecute, kRead));
EXPECT_FALSE(IsSubset(kReadExecute, kRead));
EXPECT_TRUE(IsSubset(kNoAccess, kReadWrite));
EXPECT_TRUE(IsSubset(kRead, kReadWrite));
EXPECT_TRUE(IsSubset(kReadWrite, kReadWrite));
EXPECT_FALSE(IsSubset(kReadWriteExecute, kReadWrite));
EXPECT_FALSE(IsSubset(kReadExecute, kReadWrite));
EXPECT_TRUE(IsSubset(kNoAccess, kReadWriteExecute));
EXPECT_TRUE(IsSubset(kRead, kReadWriteExecute));
EXPECT_TRUE(IsSubset(kReadWrite, kReadWriteExecute));
EXPECT_TRUE(IsSubset(kReadWriteExecute, kReadWriteExecute));
EXPECT_TRUE(IsSubset(kReadExecute, kReadWriteExecute));
EXPECT_TRUE(IsSubset(kNoAccess, kReadExecute));
EXPECT_TRUE(IsSubset(kRead, kReadExecute));
EXPECT_FALSE(IsSubset(kReadWrite, kReadExecute));
EXPECT_FALSE(IsSubset(kReadWriteExecute, kReadExecute));
EXPECT_TRUE(IsSubset(kReadExecute, kReadExecute));
}
TEST(VirtualAddressSpaceTest, TestRootSpace) {
VirtualAddressSpace rootspace;
......@@ -114,12 +152,13 @@ TEST(VirtualAddressSpaceTest, TestSubspace) {
if (!rootspace.CanAllocateSubspaces()) return;
size_t subspace_alignment = rootspace.allocation_granularity();
auto subspace = rootspace.AllocateSubspace(
VirtualAddressSpace::kNoHint, kSubspaceSize, subspace_alignment,
PagePermissions::kReadWriteExecute);
auto subspace = rootspace.AllocateSubspace(VirtualAddressSpace::kNoHint,
kSubspaceSize, subspace_alignment,
PagePermissions::kReadWrite);
ASSERT_TRUE(subspace);
EXPECT_NE(kNullAddress, subspace->base());
EXPECT_EQ(kSubspaceSize, subspace->size());
EXPECT_EQ(PagePermissions::kReadWrite, subspace->max_page_permissions());
TestRandomPageAddressGeneration(subspace.get());
TestBasicPageAllocation(subspace.get());
......@@ -131,10 +170,11 @@ TEST(VirtualAddressSpaceTest, TestSubspace) {
size_t subsubspace_alignment = subspace->allocation_granularity();
auto subsubspace = subspace->AllocateSubspace(
VirtualAddressSpace::kNoHint, kSubSubspaceSize, subsubspace_alignment,
PagePermissions::kReadWriteExecute);
PagePermissions::kReadWrite);
ASSERT_TRUE(subsubspace);
EXPECT_NE(kNullAddress, subsubspace->base());
EXPECT_EQ(kSubSubspaceSize, subsubspace->size());
EXPECT_EQ(PagePermissions::kReadWrite, subsubspace->max_page_permissions());
TestRandomPageAddressGeneration(subsubspace.get());
TestBasicPageAllocation(subsubspace.get());
......@@ -180,6 +220,7 @@ TEST(VirtualAddressSpaceTest, TestEmulatedSubspace) {
kSubspaceMappedSize, kSubspaceSize);
EXPECT_EQ(reservation, subspace.base());
EXPECT_EQ(kSubspaceSize, subspace.size());
EXPECT_EQ(rootspace.max_page_permissions(), subspace.max_page_permissions());
TestRandomPageAddressGeneration(&subspace);
TestBasicPageAllocation(&subspace);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment