Commit 090431b0 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by Commit Bot

[ptr-cage] Factor out a VirtualMemoryCage and remove PtrComprCage

The plan is to use VirtualMemoryCage both for the pointer compression
cage as well as the code range in a future CL. The PtrComprCage class is
removed in favor of using VirtualMemoryCage directly.

Bug: v8:11460
Change-Id: I4e34a3db1359319e3539ede587f6a73e0af03eec
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2824098
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarJunliang Yan <junyan@redhat.com>
Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74048}
parent bab79d3c
...@@ -2670,7 +2670,6 @@ v8_header_set("v8_internal_headers") { ...@@ -2670,7 +2670,6 @@ v8_header_set("v8_internal_headers") {
"src/init/heap-symbols.h", "src/init/heap-symbols.h",
"src/init/icu_util.h", "src/init/icu_util.h",
"src/init/isolate-allocator.h", "src/init/isolate-allocator.h",
"src/init/ptr-compr-cage.h",
"src/init/setup-isolate.h", "src/init/setup-isolate.h",
"src/init/startup-data-util.h", "src/init/startup-data-util.h",
"src/init/v8.h", "src/init/v8.h",
...@@ -3758,7 +3757,6 @@ v8_source_set("v8_base_without_compiler") { ...@@ -3758,7 +3757,6 @@ v8_source_set("v8_base_without_compiler") {
"src/init/bootstrapper.cc", "src/init/bootstrapper.cc",
"src/init/icu_util.cc", "src/init/icu_util.cc",
"src/init/isolate-allocator.cc", "src/init/isolate-allocator.cc",
"src/init/ptr-compr-cage.cc",
"src/init/startup-data-util.cc", "src/init/startup-data-util.cc",
"src/init/v8.cc", "src/init/v8.cc",
"src/interpreter/bytecode-array-builder.cc", "src/interpreter/bytecode-array-builder.cc",
......
...@@ -6,15 +6,11 @@ ...@@ -6,15 +6,11 @@
#define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_ #define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
#include "src/base/optional.h" #include "src/base/optional.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/backend/instruction.h" #include "src/compiler/backend/instruction.h"
#include "src/zone/zone-containers.h" #include "src/zone/zone-containers.h"
namespace v8 { namespace v8 {
namespace base {
class RandomNumberGenerator;
} // namespace base
namespace internal { namespace internal {
namespace compiler { namespace compiler {
......
...@@ -54,7 +54,6 @@ ...@@ -54,7 +54,6 @@
#include "src/heap/read-only-heap.h" #include "src/heap/read-only-heap.h"
#include "src/ic/stub-cache.h" #include "src/ic/stub-cache.h"
#include "src/init/bootstrapper.h" #include "src/init/bootstrapper.h"
#include "src/init/ptr-compr-cage.h"
#include "src/init/setup-isolate.h" #include "src/init/setup-isolate.h"
#include "src/init/v8.h" #include "src/init/v8.h"
#include "src/interpreter/interpreter.h" #include "src/interpreter/interpreter.h"
...@@ -2950,7 +2949,7 @@ v8::PageAllocator* Isolate::page_allocator() const { ...@@ -2950,7 +2949,7 @@ v8::PageAllocator* Isolate::page_allocator() const {
} }
Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator) Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_data_(this, isolate_allocator->GetPtrComprCageBaseAddress()), : isolate_data_(this, isolate_allocator->GetPtrComprCageBase()),
isolate_allocator_(std::move(isolate_allocator)), isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)), id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(new TracingAccountingAllocator(this)), allocator_(new TracingAccountingAllocator(this)),
......
...@@ -12,15 +12,84 @@ ...@@ -12,15 +12,84 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef V8_COMPRESS_POINTERS
namespace {
// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
// reservation. This "IsolateRootBiasPage" page is supposed to be used for
// storing part of the Isolate object when Isolate::isolate_root_bias() is
// not zero.
inline size_t GetIsolateRootBiasPageSize(
v8::PageAllocator* platform_page_allocator) {
return RoundUp(Isolate::isolate_root_bias(),
platform_page_allocator->AllocatePageSize());
}
} // namespace
struct PtrComprCageReservationParams
: public VirtualMemoryCage::ReservationParams {
PtrComprCageReservationParams() {
page_allocator = GetPlatformPageAllocator();
// This is only used when there is a per-Isolate cage, in which case the
// Isolate is allocated within the cage, and the Isolate root is also the
// cage base.
const size_t kIsolateRootBiasPageSize =
COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
? GetIsolateRootBiasPageSize(page_allocator)
: 0;
reservation_size = kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
base_alignment = kPtrComprCageBaseAlignment;
base_bias_size = kIsolateRootBiasPageSize;
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
page_size =
RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
requested_start_hint =
reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr());
}
};
#endif // V8_COMPRESS_POINTERS
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(VirtualMemoryCage, GetProcessWidePtrComprCage)
} // anonymous namespace
// static
void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
GetProcessWidePtrComprCage()->Free();
}
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// static
void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageReservationParams params;
if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
V8::FatalProcessOutOfMemory(nullptr,
"Failed to reserve memory for process-wide V8 "
"pointer compression cage");
}
#endif
}
IsolateAllocator::IsolateAllocator() { IsolateAllocator::IsolateAllocator() {
#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE) #if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
isolate_cage_.InitReservationOrDie(); PtrComprCageReservationParams params;
page_allocator_ = isolate_cage_.page_allocator(); if (!isolate_ptr_compr_cage_.InitReservation(params)) {
CommitPagesForIsolate(isolate_cage_.base()); V8::FatalProcessOutOfMemory(
nullptr,
"Failed to reserve memory for Isolate V8 pointer compression cage");
}
page_allocator_ = isolate_ptr_compr_cage_.page_allocator();
CommitPagesForIsolate();
#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) #elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
// Allocate Isolate in C++ heap when sharing a cage. // Allocate Isolate in C++ heap when sharing a cage.
CHECK(PtrComprCage::GetProcessWideCage()->IsReserved()); CHECK(GetProcessWidePtrComprCage()->IsReserved());
page_allocator_ = PtrComprCage::GetProcessWideCage()->page_allocator(); page_allocator_ = GetProcessWidePtrComprCage()->page_allocator();
isolate_memory_ = ::operator new(sizeof(Isolate)); isolate_memory_ = ::operator new(sizeof(Isolate));
#else #else
// Allocate Isolate in C++ heap. // Allocate Isolate in C++ heap.
...@@ -33,8 +102,9 @@ IsolateAllocator::IsolateAllocator() { ...@@ -33,8 +102,9 @@ IsolateAllocator::IsolateAllocator() {
IsolateAllocator::~IsolateAllocator() { IsolateAllocator::~IsolateAllocator() {
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE #ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
if (isolate_cage_.reservation_.IsReserved()) { if (isolate_ptr_compr_cage_.reservation()->IsReserved()) {
// The actual memory will be freed when the |isolate_cage_| will die. // The actual memory will be freed when the |isolate_ptr_compr_cage_| will
// die.
return; return;
} }
#endif #endif
...@@ -43,41 +113,32 @@ IsolateAllocator::~IsolateAllocator() { ...@@ -43,41 +113,32 @@ IsolateAllocator::~IsolateAllocator() {
::operator delete(isolate_memory_); ::operator delete(isolate_memory_);
} }
Address IsolateAllocator::GetPtrComprCageBaseAddress() const { VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() {
#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE #if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
return isolate_cage_.base(); return &isolate_ptr_compr_cage_;
#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE #elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
return PtrComprCage::GetProcessWideCage()->base(); return GetProcessWidePtrComprCage();
#else #else
return kNullAddress; return nullptr;
#endif #endif
} }
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE const VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() const {
namespace { return const_cast<IsolateAllocator*>(this)->GetPtrComprCage();
// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
// reservation. This "IsolateRootBiasPage" page is supposed to be used for
// storing part of the Isolate object when Isolate::isolate_root_bias() is
// not zero.
inline size_t GetIsolateRootBiasPageSize(
v8::PageAllocator* platform_page_allocator) {
return RoundUp(Isolate::isolate_root_bias(),
platform_page_allocator->AllocatePageSize());
} }
} // namespace #ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
void IsolateAllocator::CommitPagesForIsolate() {
void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) { v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
const size_t kIsolateRootBiasPageSize =
GetIsolateRootBiasPageSize(page_allocator_);
Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize; CHECK(isolate_ptr_compr_cage_.IsReserved());
Address isolate_root = isolate_ptr_compr_cage_.base();
CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment)); CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
CHECK_GE(isolate_ptr_compr_cage_.reservation()->size(),
CHECK(isolate_cage_.reservation_.InVM( kPtrComprCageReservationSize +
heap_reservation_address, GetIsolateRootBiasPageSize(platform_page_allocator));
kPtrComprCageReservationSize + kIsolateRootBiasPageSize)); CHECK(isolate_ptr_compr_cage_.reservation()->InVM(
isolate_root, kPtrComprCageReservationSize));
size_t page_size = page_allocator_->AllocatePageSize(); size_t page_size = page_allocator_->AllocatePageSize();
Address isolate_address = isolate_root - Isolate::isolate_root_bias(); Address isolate_address = isolate_root - Isolate::isolate_root_bias();
...@@ -89,24 +150,25 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) { ...@@ -89,24 +150,25 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
size_t reserved_region_size = size_t reserved_region_size =
RoundUp(isolate_end, page_size) - reserved_region_address; RoundUp(isolate_end, page_size) - reserved_region_address;
CHECK(isolate_cage_.page_allocator()->AllocatePagesAt( CHECK(isolate_ptr_compr_cage_.page_allocator()->AllocatePagesAt(
reserved_region_address, reserved_region_size, reserved_region_address, reserved_region_size,
PageAllocator::Permission::kNoAccess)); PageAllocator::Permission::kNoAccess));
} }
// Commit pages where the Isolate will be stored. // Commit pages where the Isolate will be stored.
{ {
size_t commit_page_size = page_allocator_->CommitPageSize(); size_t commit_page_size = platform_page_allocator->CommitPageSize();
Address committed_region_address = Address committed_region_address =
RoundDown(isolate_address, commit_page_size); RoundDown(isolate_address, commit_page_size);
size_t committed_region_size = size_t committed_region_size =
RoundUp(isolate_end, commit_page_size) - committed_region_address; RoundUp(isolate_end, commit_page_size) - committed_region_address;
// We are using |isolate_cage_.reservation_| directly here because // We are using |isolate_ptr_compr_cage_.reservation()| directly here
// |page_allocator_| has bigger commit page size than we actually need. // because |page_allocator_| has bigger commit page size than we actually
CHECK(isolate_cage_.reservation_.SetPermissions(committed_region_address, // need.
committed_region_size, CHECK(isolate_ptr_compr_cage_.reservation()->SetPermissions(
PageAllocator::kReadWrite)); committed_region_address, committed_region_size,
PageAllocator::kReadWrite));
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
MemsetPointer(reinterpret_cast<Address*>(committed_region_address), MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
......
...@@ -7,19 +7,12 @@ ...@@ -7,19 +7,12 @@
#include <memory> #include <memory>
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h" #include "src/base/page-allocator.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/init/ptr-compr-cage.h" #include "src/flags/flags.h"
#include "src/utils/allocation.h" #include "src/utils/allocation.h"
namespace v8 { namespace v8 {
// Forward declarations.
namespace base {
class BoundedPageAllocator;
} // namespace base
namespace internal { namespace internal {
// IsolateAllocator object is responsible for allocating memory for one (!) // IsolateAllocator object is responsible for allocating memory for one (!)
...@@ -49,18 +42,29 @@ class V8_EXPORT_PRIVATE IsolateAllocator final { ...@@ -49,18 +42,29 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
v8::PageAllocator* page_allocator() const { return page_allocator_; } v8::PageAllocator* page_allocator() const { return page_allocator_; }
// When pointer compression is on, returns the base address of the pointer Address GetPtrComprCageBase() const {
// compression cage reservation. Otherwise returns kNullAddress. return COMPRESS_POINTERS_BOOL ? GetPtrComprCage()->base() : kNullAddress;
Address GetPtrComprCageBaseAddress() const; }
// When pointer compression is on, return the pointer compression
// cage. Otherwise return nullptr.
VirtualMemoryCage* GetPtrComprCage();
const VirtualMemoryCage* GetPtrComprCage() const;
static void InitializeOncePerProcess();
private: private:
void CommitPagesForIsolate(Address heap_reservation_address); void CommitPagesForIsolate();
friend class SequentialUnmapperTest;
// Only used for testing.
static void FreeProcessWidePtrComprCageForTesting();
// The allocated memory for Isolate instance. // The allocated memory for Isolate instance.
void* isolate_memory_ = nullptr; void* isolate_memory_ = nullptr;
v8::PageAllocator* page_allocator_ = nullptr; v8::PageAllocator* page_allocator_ = nullptr;
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE #ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
PtrComprCage isolate_cage_; VirtualMemoryCage isolate_ptr_compr_cage_;
#endif #endif
}; };
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/init/ptr-compr-cage.h"
#include "src/common/ptr-compr-inl.h"
namespace v8 {
namespace internal {
PtrComprCage::PtrComprCage() = default;
// static
void PtrComprCage::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
GetProcessWideCage()->InitReservationOrDie();
#endif
}
#ifdef V8_COMPRESS_POINTERS
PtrComprCage::~PtrComprCage() { Free(); }
bool PtrComprCage::InitReservation() {
CHECK(!IsReserved());
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
// Reserve a 4Gb region such as that the reservation address is 4Gb aligned.
const size_t reservation_size = kPtrComprCageReservationSize;
const size_t base_alignment = kPtrComprCageBaseAlignment;
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
Address hint = RoundDown(
reinterpret_cast<Address>(platform_page_allocator->GetRandomMmapAddr()),
base_alignment);
// Within this reservation there will be a sub-region with proper alignment.
VirtualMemory padded_reservation(platform_page_allocator,
reservation_size * 2,
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) break;
// Find properly aligned sub-region inside the reservation.
Address address = RoundUp(padded_reservation.address(), base_alignment);
CHECK(padded_reservation.InVM(address, reservation_size));
#if defined(V8_OS_FUCHSIA)
// Fuchsia does not respect given hints so as a workaround we will use
// overreserved address space region instead of trying to re-reserve
// a subregion.
bool overreserve = true;
#else
// For the last attempt use the overreserved region to avoid an OOM crash.
// This case can happen if there are many isolates being created in
// parallel that race for reserving the regions.
bool overreserve = (attempt == kMaxAttempts - 1);
#endif
if (overreserve) {
if (padded_reservation.InVM(address, reservation_size)) {
reservation_ = std::move(padded_reservation);
base_ = address;
break;
}
} else {
// Now free the padded reservation and immediately try to reserve an exact
// region at aligned address. We have to do this dancing because the
// reservation address requirement is more complex than just a certain
// alignment and not all operating systems support freeing parts of
// reserved address space regions.
padded_reservation.Free();
VirtualMemory reservation(platform_page_allocator, reservation_size,
reinterpret_cast<void*>(address));
if (!reservation.IsReserved()) break;
// The reservation could still be somewhere else but we can accept it
// if it has the required alignment.
Address address = RoundUp(reservation.address(), base_alignment);
if (reservation.address() == address) {
reservation_ = std::move(reservation);
CHECK_EQ(reservation_.size(), reservation_size);
base_ = address;
break;
}
}
}
if (base_ == kNullAddress) return false;
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
platform_page_allocator->AllocatePageSize());
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
platform_page_allocator, base_, kPtrComprCageReservationSize, page_size);
return true;
}
void PtrComprCage::InitReservationOrDie() {
if (!InitReservation()) {
V8::FatalProcessOutOfMemory(
nullptr, "Failed to reserve memory for V8 pointer compression cage");
}
}
void PtrComprCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
reservation_.Free();
}
}
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PtrComprCage, GetSharedProcessWideCage)
} // anonymous namespace
// static
PtrComprCage* PtrComprCage::GetProcessWideCage() {
return GetSharedProcessWideCage();
}
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
#endif // V8_COMPRESS_POINTERS
} // namespace internal
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INIT_PTR_COMPR_CAGE_H_
#define V8_INIT_PTR_COMPR_CAGE_H_
#include <memory>
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/common/globals.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
class V8_EXPORT_PRIVATE PtrComprCage final {
public:
PtrComprCage();
~PtrComprCage();
PtrComprCage(const PtrComprCage&) = delete;
PtrComprCage& operator=(PtrComprCage&) = delete;
PtrComprCage(PtrComprCage&& other) V8_NOEXCEPT { *this = std::move(other); }
PtrComprCage& operator=(PtrComprCage&& other) V8_NOEXCEPT {
base_ = other.base_;
other.base_ = kNullAddress;
page_allocator_ = std::move(other.page_allocator_);
reservation_ = std::move(other.reservation_);
return *this;
}
Address base() const { return base_; }
base::BoundedPageAllocator* page_allocator() const {
return page_allocator_.get();
}
const VirtualMemory* reservation() const { return &reservation_; }
bool IsReserved() const {
DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
return base_ != kNullAddress;
}
bool InitReservation();
void InitReservationOrDie();
void Free();
static void InitializeOncePerProcess();
static PtrComprCage* GetProcessWideCage();
private:
friend class IsolateAllocator;
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
VirtualMemory reservation_;
};
} // namespace internal
} // namespace v8
#endif // V8_INIT_PTR_COMPR_CAGE_H_
...@@ -151,7 +151,7 @@ void V8::InitializeOncePerProcessImpl() { ...@@ -151,7 +151,7 @@ void V8::InitializeOncePerProcessImpl() {
#if defined(V8_USE_PERFETTO) #if defined(V8_USE_PERFETTO)
if (perfetto::Tracing::IsInitialized()) TrackEvent::Register(); if (perfetto::Tracing::IsInitialized()) TrackEvent::Register();
#endif #endif
PtrComprCage::InitializeOncePerProcess(); IsolateAllocator::InitializeOncePerProcess();
Isolate::InitializeOncePerProcess(); Isolate::InitializeOncePerProcess();
#if defined(USE_SIMULATOR) #if defined(USE_SIMULATOR)
......
...@@ -5,7 +5,9 @@ ...@@ -5,7 +5,9 @@
#include "src/utils/allocation.h" #include "src/utils/allocation.h"
#include <stdlib.h> // For free, malloc. #include <stdlib.h> // For free, malloc.
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/page-allocator.h" #include "src/base/page-allocator.h"
...@@ -293,5 +295,137 @@ void VirtualMemory::FreeReadOnly() { ...@@ -293,5 +295,137 @@ void VirtualMemory::FreeReadOnly() {
RoundUp(region.size(), page_allocator->AllocatePageSize()))); RoundUp(region.size(), page_allocator->AllocatePageSize())));
} }
VirtualMemoryCage::VirtualMemoryCage() = default;
VirtualMemoryCage::~VirtualMemoryCage() { Free(); }
VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT {
*this = std::move(other);
}
VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
V8_NOEXCEPT {
page_allocator_ = std::move(other.page_allocator_);
reservation_ = std::move(other.reservation_);
return *this;
}
namespace {
inline Address VirtualMemoryCageStart(
Address reservation_start,
const VirtualMemoryCage::ReservationParams& params) {
return RoundUp(reservation_start + params.base_bias_size,
params.base_alignment) -
params.base_bias_size;
}
} // namespace
bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
DCHECK(!reservation_.IsReserved());
const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
CHECK(IsAligned(params.reservation_size, allocate_page_size));
CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
(IsAligned(params.base_alignment, allocate_page_size) &&
IsAligned(params.base_bias_size, allocate_page_size)));
CHECK_LE(params.base_bias_size, params.reservation_size);
Address hint = RoundDown(params.requested_start_hint,
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
VirtualMemory reservation(params.page_allocator, params.reservation_size,
reinterpret_cast<void*>(hint));
if (!reservation.IsReserved()) return false;
reservation_ = std::move(reservation);
base_ = reservation_.address() + params.base_bias_size;
CHECK_EQ(reservation_.size(), params.reservation_size);
} else {
// Otherwise, we need to try harder by first overreserving
// in hopes of finding a correctly aligned address within the larger
// reservation.
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
// Reserve a region of twice the size so that there is an aligned address
// within it that's usable as the cage base.
VirtualMemory padded_reservation(params.page_allocator,
params.reservation_size * 2,
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) return false;
// Find properly aligned sub-region inside the reservation.
Address address =
VirtualMemoryCageStart(padded_reservation.address(), params);
CHECK(padded_reservation.InVM(address, params.reservation_size));
#if defined(V8_OS_FUCHSIA)
// Fuchsia does not respect given hints so as a workaround we will use
// overreserved address space region instead of trying to re-reserve
// a subregion.
bool overreserve = true;
#else
// For the last attempt use the overreserved region to avoid an OOM crash.
// This case can happen if there are many isolates being created in
// parallel that race for reserving the regions.
bool overreserve = (attempt == kMaxAttempts - 1);
#endif
if (overreserve) {
if (padded_reservation.InVM(address, params.reservation_size)) {
reservation_ = std::move(padded_reservation);
base_ = address + params.base_bias_size;
break;
}
} else {
// Now free the padded reservation and immediately try to reserve an
// exact region at aligned address. We have to do this dancing because
// the reservation address requirement is more complex than just a
// certain alignment and not all operating systems support freeing parts
// of reserved address space regions.
padded_reservation.Free();
VirtualMemory reservation(params.page_allocator,
params.reservation_size,
reinterpret_cast<void*>(address));
if (!reservation.IsReserved()) return false;
// The reservation could still be somewhere else but we can accept it
// if it has the required alignment.
Address address = VirtualMemoryCageStart(reservation.address(), params);
if (reservation.address() == address) {
reservation_ = std::move(reservation);
base_ = address + params.base_bias_size;
CHECK_EQ(reservation_.size(), params.reservation_size);
break;
}
}
}
}
CHECK_NE(base_, kNullAddress);
CHECK(IsAligned(base_, params.base_alignment));
const Address allocatable_base = RoundUp(base_, params.page_size);
const size_t allocatable_size =
RoundDown(params.reservation_size - (allocatable_base - base_) -
params.base_bias_size,
params.page_size);
page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size,
params.page_size);
return true;
}
void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
reservation_.Free();
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -13,6 +13,11 @@ ...@@ -13,6 +13,11 @@
#include "src/init/v8.h" #include "src/init/v8.h"
namespace v8 { namespace v8 {
namespace base {
class BoundedPageAllocator;
} // namespace base
namespace internal { namespace internal {
class Isolate; class Isolate;
...@@ -260,6 +265,102 @@ class VirtualMemory final { ...@@ -260,6 +265,102 @@ class VirtualMemory final {
base::AddressRegion region_; base::AddressRegion region_;
}; };
// Represents a VirtualMemory reservation along with a BoundedPageAllocator that
// can be used to allocate within the reservation.
//
// Virtual memory cages are used for both the pointer compression cage and code
// ranges (on platforms that require code ranges) and are configurable via
// ReservationParams.
//
// +------------+-----------+----------- ~~~ -+
// | ... | ... | ... |
// +------------+-----------+------------ ~~~ -+
// ^ ^ ^
// start cage base allocatable base
//
// <------------> <------------------->
// base bias size allocatable size
// <-------------------------------------------->
// reservation size
//
// - The reservation is made using ReservationParams::page_allocator.
// - start is the start of the virtual memory reservation.
// - cage base is the base address of the cage.
// - allocatable base is the cage base rounded up to the nearest
// ReservationParams::page_size, and is the start of the allocatable area for
// the BoundedPageAllocator.
//
// - The base bias is configured by ReservationParams::base_bias_size.
// - The reservation size is configured by ReservationParams::reservation_size.
//
// Additionally,
// - The alignment of the cage base is configured by
// ReservationParams::base_alignment.
// - The page size of the BoundedPageAllocator is configured by
// ReservationParams::page_size.
// - A hint for the value of start can be passed by
// ReservationParams::requested_start_hint.
//
// The configuration is subject to the following alignment requirements.
// Below, AllocatePageSize is short for
// ReservationParams::page_allocator->AllocatePageSize().
//
// - The reservation size must be AllocatePageSize-aligned.
// - If the base alignment is not kAnyBaseAlignment, both the base alignment
// and the base bias size must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
class VirtualMemoryCage {
public:
VirtualMemoryCage();
virtual ~VirtualMemoryCage();
VirtualMemoryCage(const VirtualMemoryCage&) = delete;
VirtualMemoryCage& operator=(VirtualMemoryCage&) = delete;
VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT;
VirtualMemoryCage& operator=(VirtualMemoryCage&& other) V8_NOEXCEPT;
Address base() const { return base_; }
base::BoundedPageAllocator* page_allocator() const {
return page_allocator_.get();
}
VirtualMemory* reservation() { return &reservation_; }
const VirtualMemory* reservation() const { return &reservation_; }
bool IsReserved() const {
DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
return reservation_.IsReserved();
}
struct ReservationParams {
// The allocator to use to reserve the virtual memory.
v8::PageAllocator* page_allocator;
// See diagram above.
size_t reservation_size;
size_t base_alignment;
size_t base_bias_size;
size_t page_size;
Address requested_start_hint;
static constexpr size_t kAnyBaseAlignment = 1;
};
// A number of attempts is made to try to reserve a region that satisfies the
// constraints in params, but this may fail. The base address may be different
// than the one requested.
bool InitReservation(const ReservationParams& params);
void Free();
protected:
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
VirtualMemory reservation_;
};
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/execution/isolate-inl.h" #include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/init/ptr-compr-cage.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
...@@ -61,12 +60,6 @@ UNINITIALIZED_TEST(SharedPtrComprCage) { ...@@ -61,12 +60,6 @@ UNINITIALIZED_TEST(SharedPtrComprCage) {
CHECK_EQ(GetPtrComprCageBase(*isolate1_object), CHECK_EQ(GetPtrComprCageBase(*isolate1_object),
GetPtrComprCageBase(*isolate2_object)); GetPtrComprCageBase(*isolate2_object));
const PtrComprCage* cage = PtrComprCage::GetProcessWideCage();
CHECK(cage->reservation()->InVM(isolate1_object->ptr(),
isolate1_object->Size()));
CHECK(cage->reservation()->InVM(isolate2_object->ptr(),
isolate2_object->Size()));
} }
isolate1->Dispose(); isolate1->Dispose();
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/memory-allocator.h" #include "src/heap/memory-allocator.h"
#include "src/heap/spaces-inl.h" #include "src/heap/spaces-inl.h"
#include "src/init/ptr-compr-cage.h"
#include "src/utils/ostreams.h" #include "src/utils/ostreams.h"
#include "test/unittests/test-utils.h" #include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
...@@ -243,8 +242,8 @@ class SequentialUnmapperTest : public TestWithIsolate { ...@@ -243,8 +242,8 @@ class SequentialUnmapperTest : public TestWithIsolate {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the // Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator. // TrackingPageAllocator.
PtrComprCage::GetProcessWideCage()->Free(); IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
PtrComprCage::GetProcessWideCage()->InitReservationOrDie(); IsolateAllocator::InitializeOncePerProcess();
#endif #endif
TestWithIsolate::SetUpTestCase(); TestWithIsolate::SetUpTestCase();
} }
...@@ -254,7 +253,7 @@ class SequentialUnmapperTest : public TestWithIsolate { ...@@ -254,7 +253,7 @@ class SequentialUnmapperTest : public TestWithIsolate {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Free the process-wide cage reservation, otherwise the pages won't be // Free the process-wide cage reservation, otherwise the pages won't be
// freed until process teardown. // freed until process teardown.
PtrComprCage::GetProcessWideCage()->Free(); IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#endif #endif
i::FLAG_concurrent_sweeping = old_flag_; i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty()); CHECK(tracking_page_allocator_->IsEmpty());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment