Commit 954c19c4 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc: Pass PageAllocator as reference when expecting non-null ref

Change-Id: Id807e5e09fff59f4aedfca67461ffe3af3ffbea3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3114144
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Auto-Submit: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76458}
parent 22cd8032
......@@ -62,9 +62,9 @@ HeapBase::HeapBase(
#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
caged_heap_(this, page_allocator()),
page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator())),
#else // !CPPGC_CAGED_HEAP
page_backend_(std::make_unique<PageBackend>(page_allocator())),
page_backend_(std::make_unique<PageBackend>(*page_allocator())),
#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
......
......@@ -12,11 +12,11 @@ namespace internal {
namespace {
void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
void Unprotect(PageAllocator& allocator, const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
page_memory.writeable_region().size(),
PageAllocator::Permission::kReadWrite));
CHECK(allocator.SetPermissions(page_memory.writeable_region().base(),
page_memory.writeable_region().size(),
PageAllocator::Permission::kReadWrite));
} else {
// No protection in case the allocator cannot commit at the required
// granularity. Only protect if the allocator supports committing at that
......@@ -24,51 +24,51 @@ void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
//
// The allocator needs to support committing the overall range.
CHECK_EQ(0u,
page_memory.overall_region().size() % allocator->CommitPageSize());
CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
page_memory.overall_region().size(),
PageAllocator::Permission::kReadWrite));
page_memory.overall_region().size() % allocator.CommitPageSize());
CHECK(allocator.SetPermissions(page_memory.overall_region().base(),
page_memory.overall_region().size(),
PageAllocator::Permission::kReadWrite));
}
}
void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
void Protect(PageAllocator& allocator, const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
// Swap the same region, providing the OS with a chance for fast lookup and
// change.
CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
page_memory.writeable_region().size(),
PageAllocator::Permission::kNoAccess));
CHECK(allocator.SetPermissions(page_memory.writeable_region().base(),
page_memory.writeable_region().size(),
PageAllocator::Permission::kNoAccess));
} else {
// See Unprotect().
CHECK_EQ(0u,
page_memory.overall_region().size() % allocator->CommitPageSize());
CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
page_memory.overall_region().size(),
PageAllocator::Permission::kNoAccess));
page_memory.overall_region().size() % allocator.CommitPageSize());
CHECK(allocator.SetPermissions(page_memory.overall_region().base(),
page_memory.overall_region().size(),
PageAllocator::Permission::kNoAccess));
}
}
MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
MemoryRegion ReserveMemoryRegion(PageAllocator& allocator,
size_t allocation_size) {
void* region_memory =
allocator->AllocatePages(nullptr, allocation_size, kPageSize,
PageAllocator::Permission::kNoAccess);
allocator.AllocatePages(nullptr, allocation_size, kPageSize,
PageAllocator::Permission::kNoAccess);
const MemoryRegion reserved_region(static_cast<Address>(region_memory),
allocation_size);
DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
return reserved_region;
}
void FreeMemoryRegion(PageAllocator* allocator,
void FreeMemoryRegion(PageAllocator& allocator,
const MemoryRegion& reserved_region) {
// Make sure pages returned to OS are unpoisoned.
ASAN_UNPOISON_MEMORY_REGION(reserved_region.base(), reserved_region.size());
allocator->FreePages(reserved_region.base(), reserved_region.size());
allocator.FreePages(reserved_region.base(), reserved_region.size());
}
} // namespace
PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
PageMemoryRegion::PageMemoryRegion(PageAllocator& allocator,
MemoryRegion reserved_region, bool is_large)
: allocator_(allocator),
reserved_region_(reserved_region),
......@@ -81,12 +81,12 @@ PageMemoryRegion::~PageMemoryRegion() {
// static
constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
: PageMemoryRegion(allocator,
ReserveMemoryRegion(
allocator, RoundUp(kPageSize * kNumPageRegions,
allocator->AllocatePageSize())),
false) {
NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator& allocator)
: PageMemoryRegion(
allocator,
ReserveMemoryRegion(allocator, RoundUp(kPageSize * kNumPageRegions,
allocator.AllocatePageSize())),
false) {
#ifdef DEBUG
for (size_t i = 0; i < kNumPageRegions; ++i) {
DCHECK_EQ(false, page_memories_in_use_[i]);
......@@ -114,13 +114,13 @@ void NormalPageMemoryRegion::UnprotectForTesting() {
}
}
LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator& allocator,
size_t length)
: PageMemoryRegion(allocator,
ReserveMemoryRegion(
allocator, RoundUp(length + 2 * kGuardPageSize,
allocator->AllocatePageSize())),
true) {}
: PageMemoryRegion(
allocator,
ReserveMemoryRegion(allocator, RoundUp(length + 2 * kGuardPageSize,
allocator.AllocatePageSize())),
true) {}
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
......@@ -165,7 +165,7 @@ std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
return pair;
}
PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
PageBackend::PageBackend(PageAllocator& allocator) : allocator_(allocator) {}
PageBackend::~PageBackend() = default;
......
......@@ -79,9 +79,9 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
virtual void UnprotectForTesting() = 0;
protected:
PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
PageMemoryRegion(PageAllocator&, MemoryRegion, bool);
PageAllocator* const allocator_;
PageAllocator& allocator_;
const MemoryRegion reserved_region_;
const bool is_large_;
};
......@@ -91,7 +91,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
public:
static constexpr size_t kNumPageRegions = 10;
explicit NormalPageMemoryRegion(PageAllocator*);
explicit NormalPageMemoryRegion(PageAllocator&);
~NormalPageMemoryRegion() override;
const PageMemory GetPageMemory(size_t index) const {
......@@ -133,7 +133,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// LargePageMemoryRegion serves a single large PageMemory object.
class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
public:
LargePageMemoryRegion(PageAllocator*, size_t);
LargePageMemoryRegion(PageAllocator&, size_t);
~LargePageMemoryRegion() override;
const PageMemory GetPageMemory() const {
......@@ -193,7 +193,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
explicit PageBackend(PageAllocator*);
explicit PageBackend(PageAllocator&);
~PageBackend();
// Allocates a normal page from the backend.
......@@ -223,7 +223,7 @@ class V8_EXPORT_PRIVATE PageBackend final {
PageBackend& operator=(const PageBackend&) = delete;
private:
PageAllocator* allocator_;
PageAllocator& allocator_;
NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_;
std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
......@@ -233,8 +233,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
return kGuardPageSize % allocator->CommitPageSize() == 0;
inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
return kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
......
......@@ -77,7 +77,7 @@ TEST(PageMemoryDeathTest, ConstructNonContainedRegions) {
TEST(PageMemoryRegionTest, NormalPageMemoryRegion) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator);
pmr->UnprotectForTesting();
MemoryRegion prev_overall;
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
......@@ -103,7 +103,7 @@ TEST(PageMemoryRegionTest, NormalPageMemoryRegion) {
TEST(PageMemoryRegionTest, LargePageMemoryRegion) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
auto pmr = std::make_unique<LargePageMemoryRegion>(allocator, 1024);
pmr->UnprotectForTesting();
const PageMemory pm = pmr->GetPageMemory();
EXPECT_LE(1024u, pm.writeable_region().size());
......@@ -116,16 +116,16 @@ TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
// regions.
v8::base::PageAllocator allocator;
#if defined(V8_HOST_ARCH_PPC64) && !defined(_AIX)
EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
EXPECT_FALSE(SupportsCommittingGuardPages(allocator));
#elif defined(V8_HOST_ARCH_ARM64)
if (allocator.CommitPageSize() == 4096) {
EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
EXPECT_TRUE(SupportsCommittingGuardPages(allocator));
} else {
// Arm64 supports both 16k and 64k OS pages.
EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
EXPECT_FALSE(SupportsCommittingGuardPages(allocator));
}
#else // Regular case.
EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
EXPECT_TRUE(SupportsCommittingGuardPages(allocator));
#endif
}
......@@ -141,7 +141,7 @@ TEST(PageMemoryRegionDeathTest, ReservationIsFreed) {
// and thus not crash.
EXPECT_DEATH_IF_SUPPORTED(
v8::base::PageAllocator allocator; Address base; {
auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
auto pmr = std::make_unique<LargePageMemoryRegion>(allocator, 1024);
base = pmr->reserved_region().base();
} access(base[0]);
, "");
......@@ -149,8 +149,8 @@ TEST(PageMemoryRegionDeathTest, ReservationIsFreed) {
TEST(PageMemoryRegionDeathTest, FrontGuardPageAccessCrashes) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
if (SupportsCommittingGuardPages(&allocator)) {
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator);
if (SupportsCommittingGuardPages(allocator)) {
EXPECT_DEATH_IF_SUPPORTED(
access(pmr->GetPageMemory(0).overall_region().base()[0]), "");
}
......@@ -158,8 +158,8 @@ TEST(PageMemoryRegionDeathTest, FrontGuardPageAccessCrashes) {
TEST(PageMemoryRegionDeathTest, BackGuardPageAccessCrashes) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
if (SupportsCommittingGuardPages(&allocator)) {
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator);
if (SupportsCommittingGuardPages(allocator)) {
EXPECT_DEATH_IF_SUPPORTED(
access(pmr->GetPageMemory(0).writeable_region().end()[0]), "");
}
......@@ -167,7 +167,7 @@ TEST(PageMemoryRegionDeathTest, BackGuardPageAccessCrashes) {
TEST(PageMemoryRegionTreeTest, AddNormalLookupRemove) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator);
PageMemoryRegionTree tree;
tree.Add(pmr.get());
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
......@@ -182,7 +182,7 @@ TEST(PageMemoryRegionTreeTest, AddNormalLookupRemove) {
TEST(PageMemoryRegionTreeTest, AddLargeLookupRemove) {
v8::base::PageAllocator allocator;
constexpr size_t kLargeSize = 5012;
auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
auto pmr = std::make_unique<LargePageMemoryRegion>(allocator, kLargeSize);
PageMemoryRegionTree tree;
tree.Add(pmr.get());
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
......@@ -196,9 +196,9 @@ TEST(PageMemoryRegionTreeTest, AddLargeLookupRemove) {
TEST(PageMemoryRegionTreeTest, AddLookupRemoveMultiple) {
v8::base::PageAllocator allocator;
auto pmr1 = std::make_unique<NormalPageMemoryRegion>(&allocator);
auto pmr1 = std::make_unique<NormalPageMemoryRegion>(allocator);
constexpr size_t kLargeSize = 3127;
auto pmr2 = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
auto pmr2 = std::make_unique<LargePageMemoryRegion>(allocator, kLargeSize);
PageMemoryRegionTree tree;
tree.Add(pmr1.get());
tree.Add(pmr2.get());
......@@ -223,7 +223,7 @@ TEST(NormalPageMemoryPool, ConstructorEmpty) {
TEST(NormalPageMemoryPool, AddTakeSameBucket) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator);
const PageMemory pm = pmr->GetPageMemory(0);
NormalPageMemoryPool pool;
constexpr size_t kBucket = 0;
......@@ -235,7 +235,7 @@ TEST(NormalPageMemoryPool, AddTakeSameBucket) {
TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
v8::base::PageAllocator allocator;
auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator);
const PageMemory pm = pmr->GetPageMemory(0);
NormalPageMemoryPool pool;
constexpr size_t kFirstBucket = 0;
......@@ -250,7 +250,7 @@ TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
TEST(PageBackendTest, AllocateNormalUsesPool) {
v8::base::PageAllocator allocator;
PageBackend backend(&allocator);
PageBackend backend(allocator);
constexpr size_t kBucket = 0;
Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket);
EXPECT_NE(nullptr, writeable_base1);
......@@ -262,7 +262,7 @@ TEST(PageBackendTest, AllocateNormalUsesPool) {
TEST(PageBackendTest, AllocateLarge) {
v8::base::PageAllocator allocator;
PageBackend backend(&allocator);
PageBackend backend(allocator);
Address writeable_base1 = backend.AllocateLargePageMemory(13731);
EXPECT_NE(nullptr, writeable_base1);
Address writeable_base2 = backend.AllocateLargePageMemory(9478);
......@@ -274,7 +274,7 @@ TEST(PageBackendTest, AllocateLarge) {
TEST(PageBackendTest, LookupNormal) {
v8::base::PageAllocator allocator;
PageBackend backend(&allocator);
PageBackend backend(allocator);
constexpr size_t kBucket = 0;
Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
......@@ -290,7 +290,7 @@ TEST(PageBackendTest, LookupNormal) {
TEST(PageBackendTest, LookupLarge) {
v8::base::PageAllocator allocator;
PageBackend backend(&allocator);
PageBackend backend(allocator);
constexpr size_t kSize = 7934;
Address writeable_base = backend.AllocateLargePageMemory(kSize);
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
......@@ -303,7 +303,7 @@ TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
v8::base::PageAllocator allocator;
Address base;
{
PageBackend backend(&allocator);
PageBackend backend(allocator);
constexpr size_t kBucket = 0;
base = backend.AllocateNormalPageMemory(kBucket);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment