Commit 411cd56d authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Remove the 2GB split

The split is rudimental and now is not needed at all:
- as part of the shared-cage effort we added HeapHandle pointer to the
  BasePageHandle class (on the API side);
- for the value-full barrier we get HeapHandle from bitmasking the
  value;
- for the value-less barrier we get it from the callback provided by the
  caller.

The CL entirely removes the split and uses the single
BoundedPageAllocator. A minor note: the conservative stack scanning can
become sligthly more expensive.

Bug: chromium:1361582, chromium:1325007
Change-Id: I2a8aded3dd12037998f36341c68af8e23b0dcd88
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3899320Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83232}
parent 2cc1f9a3
......@@ -47,10 +47,7 @@ constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(2) * kGB;
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
#endif // !defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
constexpr size_t kCagedHeapNormalPageReservationSize =
kCagedHeapReservationSize / 2;
#endif
#endif // defined(CPPGC_CAGED_HEAP)
static constexpr size_t kDefaultAlignment = sizeof(void*);
......
......@@ -25,6 +25,7 @@ namespace cppgc {
namespace internal {
class HeapBase;
class HeapBaseHandle;
#if defined(CPPGC_YOUNG_GENERATION)
......@@ -92,8 +93,6 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
#endif // CPPGC_YOUNG_GENERATION
// TODO(v8:12231): Remove this class entirely so that it doesn't occupy space is
// when CPPGC_YOUNG_GENERATION is off.
struct CagedHeapLocalData final {
V8_INLINE static CagedHeapLocalData& Get() {
return *reinterpret_cast<CagedHeapLocalData*>(CagedHeapBase::GetBase());
......
......@@ -45,33 +45,11 @@ class V8_EXPORT CagedHeapBase {
kHalfWordShift);
}
V8_INLINE static bool IsWithinNormalPageReservation(void* address) {
return (reinterpret_cast<uintptr_t>(address) - g_heap_base_) <
api_constants::kCagedHeapNormalPageReservationSize;
}
V8_INLINE static bool IsWithinLargePageReservation(const void* ptr) {
CPPGC_DCHECK(g_heap_base_);
auto uptr = reinterpret_cast<uintptr_t>(ptr);
return (uptr >= g_heap_base_ +
api_constants::kCagedHeapNormalPageReservationSize) &&
(uptr < g_heap_base_ + api_constants::kCagedHeapReservationSize);
}
V8_INLINE static uintptr_t GetBase() { return g_heap_base_; }
V8_INLINE static BasePageHandle& LookupPageFromInnerPointer(void* ptr) {
if (V8_LIKELY(IsWithinNormalPageReservation(ptr)))
return *BasePageHandle::FromPayload(ptr);
else
return LookupLargePageFromInnerPointer(ptr);
}
private:
friend class CagedHeap;
static BasePageHandle& LookupLargePageFromInnerPointer(void* address);
static uintptr_t g_heap_base_;
};
......
......@@ -268,8 +268,8 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
if (V8_LIKELY(!WriteBarrier::IsEnabled()))
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
#if defined(CPPGC_YOUNG_GENERATION)
HeapHandle& handle = callback();
#if defined(CPPGC_YOUNG_GENERATION)
if (V8_LIKELY(!handle.is_incremental_marking_in_progress())) {
if (!handle.is_young_generation_enabled()) {
return WriteBarrier::Type::kNone;
......@@ -283,7 +283,6 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
return SetAndReturnType<WriteBarrier::Type::kGenerational>(params);
}
#else // !defined(CPPGC_YOUNG_GENERATION)
HeapHandle& handle = callback();
if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) {
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
}
......
......@@ -33,8 +33,6 @@ static_assert(api_constants::kCagedHeapReservationSize ==
kCagedHeapReservationSize);
static_assert(api_constants::kCagedHeapReservationAlignment ==
kCagedHeapReservationAlignment);
static_assert(api_constants::kCagedHeapNormalPageReservationSize ==
kCagedHeapNormalPageReservationSize);
uintptr_t CagedHeapBase::g_heap_base_ = 0u;
......@@ -124,72 +122,14 @@ CagedHeap::CagedHeap(PageAllocator& platform_allocator)
const size_t local_data_size_with_padding =
caged_heap_start - reinterpret_cast<CagedAddress>(cage_start);
normal_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
page_bounded_allocator_ = std::make_unique<v8::base::BoundedPageAllocator>(
&platform_allocator, caged_heap_start,
kCagedHeapNormalPageReservationSize - local_data_size_with_padding,
kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible);
large_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
&platform_allocator,
reinterpret_cast<uintptr_t>(cage_start) +
kCagedHeapNormalPageReservationSize,
kCagedHeapNormalPageReservationSize, kPageSize,
kCagedHeapReservationSize - local_data_size_with_padding, kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible);
instance_ = this;
}
void CagedHeap::NotifyLargePageCreated(LargePage* page) {
DCHECK(page);
v8::base::MutexGuard guard(&large_pages_mutex_);
auto result = large_pages_.insert(page);
USE(result);
DCHECK(result.second);
}
void CagedHeap::NotifyLargePageDestroyed(LargePage* page) {
DCHECK(page);
v8::base::MutexGuard guard(&large_pages_mutex_);
auto size = large_pages_.erase(page);
USE(size);
DCHECK_EQ(1u, size);
}
BasePage& CagedHeap::LookupPageFromInnerPointer(void* ptr) const {
DCHECK(IsOnHeap(ptr));
if (V8_LIKELY(CagedHeapBase::IsWithinNormalPageReservation(ptr))) {
return *NormalPage::FromPayload(ptr);
} else {
return LookupLargePageFromInnerPointer(ptr);
}
}
LargePage& CagedHeap::LookupLargePageFromInnerPointer(void* ptr) const {
v8::base::MutexGuard guard(&large_pages_mutex_);
auto it = large_pages_.upper_bound(static_cast<LargePage*>(ptr));
DCHECK_NE(large_pages_.begin(), it);
auto* page = *std::next(it, -1);
DCHECK(page);
DCHECK(page->PayloadContains(static_cast<ConstAddress>(ptr)));
return *page;
}
void CagedHeap::ResetForTesting() {
v8::base::MutexGuard guard(&large_pages_mutex_);
// Clear the large pages to support tests within the same process.
large_pages_.clear();
}
// static
BasePageHandle& CagedHeapBase::LookupLargePageFromInnerPointer(void* address) {
auto& page = CagedHeap::Instance().LookupLargePageFromInnerPointer(address);
return page;
}
} // namespace internal
} // namespace cppgc
......@@ -7,7 +7,6 @@
#include <limits>
#include <memory>
#include <set>
#include "include/cppgc/internal/caged-heap.h"
#include "include/cppgc/platform.h"
......@@ -24,10 +23,6 @@ namespace testing {
class TestWithHeap;
}
struct CagedHeapLocalData;
class BasePage;
class LargePage;
class V8_EXPORT_PRIVATE CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
......@@ -46,10 +41,6 @@ class V8_EXPORT_PRIVATE CagedHeap final {
~(kCagedHeapReservationAlignment - 1);
}
static bool IsWithinNormalPageReservation(const void* address) {
return OffsetFromAddress(address) < kCagedHeapNormalPageReservationSize;
}
static void InitializeIfNeeded(PageAllocator&);
static CagedHeap& Instance();
......@@ -57,26 +48,11 @@ class V8_EXPORT_PRIVATE CagedHeap final {
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
AllocatorType& normal_page_allocator() {
return *normal_page_bounded_allocator_;
}
const AllocatorType& normal_page_allocator() const {
return *normal_page_bounded_allocator_;
AllocatorType& page_allocator() { return *page_bounded_allocator_; }
const AllocatorType& page_allocator() const {
return *page_bounded_allocator_;
}
AllocatorType& large_page_allocator() {
return *large_page_bounded_allocator_;
}
const AllocatorType& large_page_allocator() const {
return *large_page_bounded_allocator_;
}
void NotifyLargePageCreated(LargePage* page);
void NotifyLargePageDestroyed(LargePage* page);
BasePage& LookupPageFromInnerPointer(void* ptr) const;
LargePage& LookupLargePageFromInnerPointer(void* ptr) const;
bool IsOnHeap(const void* address) const {
DCHECK_EQ(reserved_area_.address(),
reinterpret_cast<void*>(CagedHeapBase::GetBase()));
......@@ -92,20 +68,12 @@ class V8_EXPORT_PRIVATE CagedHeap final {
explicit CagedHeap(PageAllocator& platform_allocator);
void ResetForTesting();
static CagedHeap* instance_;
const VirtualMemory reserved_area_;
// BoundedPageAllocator is thread-safe, no need to use external
// synchronization.
std::unique_ptr<AllocatorType> normal_page_bounded_allocator_;
std::unique_ptr<AllocatorType> large_page_bounded_allocator_;
std::set<LargePage*> large_pages_;
// TODO(chromium:1325007): Since writes are rare, consider using read-write
// lock to speed up reading.
mutable v8::base::Mutex large_pages_mutex_;
std::unique_ptr<AllocatorType> page_bounded_allocator_;
};
} // namespace internal
......
......@@ -79,11 +79,6 @@ constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(2) * kGB;
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
#endif // !defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
// TODO(v8:12231): To reduce OOM probability, instead of the fixed-size
// reservation consider to use a moving needle implementation or simply
// calibrating this 2GB/2GB split.
constexpr size_t kCagedHeapNormalPageReservationSize =
kCagedHeapReservationSize / 2;
} // namespace internal
} // namespace cppgc
......
......@@ -147,9 +147,8 @@ std::unique_ptr<PageBackend> HeapBase::InitializePageBackend(
PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler) {
#if defined(CPPGC_CAGED_HEAP)
auto& caged_heap = CagedHeap::Instance();
return std::make_unique<PageBackend>(caged_heap.normal_page_allocator(),
caged_heap.large_page_allocator(),
oom_handler);
return std::make_unique<PageBackend>(
caged_heap.page_allocator(), caged_heap.page_allocator(), oom_handler);
#else // !CPPGC_CAGED_HEAP
return std::make_unique<PageBackend>(allocator, allocator, oom_handler);
#endif // !CPPGC_CAGED_HEAP
......
......@@ -47,13 +47,8 @@ BasePage* BasePage::FromInnerAddress(const HeapBase* heap, void* address) {
// static
const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
const void* address) {
#if defined(CPPGC_CAGED_HEAP)
return static_cast<BasePage*>(
&CagedHeapBase::LookupPageFromInnerPointer(const_cast<void*>(address)));
#else // !defined(CPPGC_CAGED_HEAP)
return reinterpret_cast<const BasePage*>(
heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
#endif // !defined(CPPGC_CAGED_HEAP)
}
// static
......@@ -247,9 +242,6 @@ LargePage* LargePage::TryCreate(PageBackend& page_backend,
LargePage* page = new (memory) LargePage(*heap, space, size);
page->SynchronizedStore();
#if defined(CPPGC_CAGED_HEAP)
CagedHeap::Instance().NotifyLargePageCreated(page);
#endif // defined(CPPGC_CAGED_HEAP)
page->heap().stats_collector()->NotifyAllocatedMemory(allocation_size);
return page;
}
......@@ -271,9 +263,6 @@ void LargePage::Destroy(LargePage* page) {
#endif // DEBUG
page->~LargePage();
PageBackend* backend = heap.page_backend();
#if defined(CPPGC_CAGED_HEAP)
CagedHeap::Instance().NotifyLargePageDestroyed(page);
#endif // defined(CPPGC_CAGED_HEAP)
heap.stats_collector()->NotifyFreedMemory(AllocationSize(payload_size));
backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
}
......
......@@ -46,11 +46,7 @@ TestWithHeap::TestWithHeap()
: heap_(Heap::Create(platform_)),
allocation_handle_(heap_->GetAllocationHandle()) {}
TestWithHeap::~TestWithHeap() {
#if defined(CPPGC_CAGED_HEAP)
CagedHeap::Instance().ResetForTesting();
#endif // defined(CPPGC_CAGED_HEAP)
}
TestWithHeap::~TestWithHeap() = default;
void TestWithHeap::ResetLinearAllocationBuffers() {
Heap::From(GetHeap())->object_allocator().ResetLinearAllocationBuffers();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment