Commit d4056c61 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

cppgc: Add page memory allocation backend

This is a port of src/components/gc that was added recently.

Differences:
- Added back bucketing to the page pool, as that guarantees that
  arenas used for specific types do not have their pages used by other
  arenas.
- Replaced base::flat_map with std::map. This may cause performance
  regressions when using PageMemoryRegionTree in hot paths. A
  vector-like representation may be used to fix such a regression

Bug: chromium:1056170
Change-Id: I03e3abe55faf7bb50c8011dafc23396889bf66db
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2139586
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67073}
parent e2f66618
......@@ -3969,6 +3969,9 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/heap-object-header.h",
"src/heap/cppgc/heap.cc",
"src/heap/cppgc/heap.h",
"src/heap/cppgc/page-memory-inl.h",
"src/heap/cppgc/page-memory.cc",
"src/heap/cppgc/page-memory.h",
"src/heap/cppgc/platform.cc",
"src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/sanitizers.h",
......
......@@ -31,6 +31,10 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
// Guard pages are always put into memory. Whether they are actually protected
// depends on the allocator provided to the garbage collector.
constexpr size_t kGuardPageSize = 4096;
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
} // namespace internal
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
#define V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
#include "src/heap/cppgc/page-memory.h"
namespace cppgc {
namespace internal {
// Returns true if the provided allocator supports committing at the required
// granularity.
inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
return kGuardPageSize % allocator->CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(Address address) const {
size_t index = GetIndex(address);
if (!page_memories_in_use_[index]) return nullptr;
const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
Address LargePageMemoryRegion::Lookup(Address address) const {
const MemoryRegion writeable_region = GetPageMemory().writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
Address PageMemoryRegion::Lookup(Address address) const {
DCHECK(reserved_region().Contains(address));
return is_large()
? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
: static_cast<const NormalPageMemoryRegion*>(this)->Lookup(
address);
}
PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
auto it = set_.upper_bound(address);
// This check also covers set_.size() > 0, since for empty vectors it is
// guaranteed that begin() == end().
if (it == set_.begin()) return nullptr;
auto* result = std::next(it, -1)->second;
if (address < result->reserved_region().end()) return result;
return nullptr;
}
Address PageBackend::Lookup(Address address) const {
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/page-memory.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/page-memory-inl.h"
namespace cppgc {
namespace internal {
namespace {
void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
page_memory.writeable_region().size(),
PageAllocator::Permission::kReadWrite));
} else {
// No protection in case the allocator cannot commit at the required
// granularity. Only protect if the allocator supports committing at that
// granularity.
//
// The allocator needs to support committing the overall range.
CHECK_EQ(0u,
page_memory.overall_region().size() % allocator->CommitPageSize());
CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
page_memory.overall_region().size(),
PageAllocator::Permission::kReadWrite));
}
}
void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
// Swap the same region, providing the OS with a chance for fast lookup and
// change.
CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
page_memory.writeable_region().size(),
PageAllocator::Permission::kNoAccess));
} else {
// See Unprotect().
CHECK_EQ(0u,
page_memory.overall_region().size() % allocator->CommitPageSize());
CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
page_memory.overall_region().size(),
PageAllocator::Permission::kNoAccess));
}
}
MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
size_t allocation_size) {
void* region_memory =
allocator->AllocatePages(nullptr, allocation_size, kPageSize,
PageAllocator::Permission::kNoAccess);
const MemoryRegion reserved_region(static_cast<Address>(region_memory),
allocation_size);
DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
return reserved_region;
}
void FreeMemoryRegion(PageAllocator* allocator,
const MemoryRegion& reserved_region) {
allocator->FreePages(reserved_region.base(), reserved_region.size());
}
} // namespace
PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
MemoryRegion reserved_region, bool is_large)
: allocator_(allocator),
reserved_region_(reserved_region),
is_large_(is_large) {}
PageMemoryRegion::~PageMemoryRegion() {
FreeMemoryRegion(allocator_, reserved_region());
}
// static
constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
: PageMemoryRegion(allocator,
ReserveMemoryRegion(
allocator, RoundUp(kPageSize * kNumPageRegions,
allocator->AllocatePageSize())),
false) {
#ifdef DEBUG
for (size_t i = 0; i < kNumPageRegions; ++i) {
DCHECK_EQ(false, page_memories_in_use_[i]);
}
#endif // DEBUG
}
NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
void NormalPageMemoryRegion::Allocate(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, true);
Unprotect(allocator_, GetPageMemory(index));
}
void NormalPageMemoryRegion::Free(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, false);
Protect(allocator_, GetPageMemory(index));
}
void NormalPageMemoryRegion::UnprotectForTesting() {
for (size_t i = 0; i < kNumPageRegions; ++i) {
Unprotect(allocator_, GetPageMemory(i));
}
}
LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
size_t length)
: PageMemoryRegion(allocator,
ReserveMemoryRegion(
allocator, RoundUp(length + 2 * kGuardPageSize,
allocator->AllocatePageSize())),
true) {}
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
void LargePageMemoryRegion::UnprotectForTesting() {
Unprotect(allocator_, GetPageMemory());
}
PageMemoryRegionTree::PageMemoryRegionTree() = default;
PageMemoryRegionTree::~PageMemoryRegionTree() = default;
void PageMemoryRegionTree::Add(PageMemoryRegion* region) {
DCHECK(region);
auto result = set_.emplace(region->reserved_region().base(), region);
USE(result);
DCHECK(result.second);
}
void PageMemoryRegionTree::Remove(PageMemoryRegion* region) {
DCHECK(region);
auto size = set_.erase(region->reserved_region().base());
USE(size);
DCHECK_EQ(1u, size);
}
NormalPageMemoryPool::NormalPageMemoryPool() = default;
NormalPageMemoryPool::~NormalPageMemoryPool() = default;
void NormalPageMemoryPool::Add(size_t bucket, NormalPageMemoryRegion* pmr,
Address writeable_base) {
DCHECK_LT(bucket, kNumPoolBuckets);
pool_[bucket].push_back(std::make_pair(pmr, writeable_base));
}
std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
size_t bucket) {
DCHECK_LT(bucket, kNumPoolBuckets);
if (pool_[bucket].empty()) return {nullptr, nullptr};
std::pair<NormalPageMemoryRegion*, Address> pair = pool_[bucket].back();
pool_[bucket].pop_back();
return pair;
}
PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
PageBackend::~PageBackend() = default;
Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
if (!result.first) {
auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
page_pool_.Add(bucket, pmr.get(),
pmr->GetPageMemory(i).writeable_region().base());
}
page_memory_region_tree_.Add(pmr.get());
normal_page_memory_regions_.push_back(std::move(pmr));
return AllocateNormalPageMemory(bucket);
}
result.first->Allocate(result.second);
return result.second;
}
void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
auto* pmr = static_cast<NormalPageMemoryRegion*>(
page_memory_region_tree_.Lookup(writeable_base));
pmr->Free(writeable_base);
page_pool_.Add(bucket, pmr, writeable_base);
}
Address PageBackend::AllocateLargePageMemory(size_t size) {
auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
const PageMemory pm = pmr->GetPageMemory();
Unprotect(allocator_, pm);
page_memory_region_tree_.Add(pmr.get());
large_page_memory_regions_.insert({pmr.get(), std::move(pmr)});
return pm.writeable_region().base();
}
void PageBackend::FreeLargePageMemory(Address writeable_base) {
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
page_memory_region_tree_.Remove(pmr);
auto size = large_page_memory_regions_.erase(pmr);
USE(size);
DCHECK_EQ(1u, size);
}
} // namespace internal
} // namespace cppgc
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_H_
#define V8_HEAP_CPPGC_PAGE_MEMORY_H_
#include <array>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
#include "include/cppgc/platform.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
class V8_EXPORT_PRIVATE MemoryRegion final {
public:
MemoryRegion() = default;
MemoryRegion(Address base, size_t size) : base_(base), size_(size) {
DCHECK(base);
DCHECK_LT(0u, size);
}
Address base() const { return base_; }
size_t size() const { return size_; }
Address end() const { return base_ + size_; }
bool Contains(Address addr) const {
return (reinterpret_cast<uintptr_t>(addr) -
reinterpret_cast<uintptr_t>(base_)) < size_;
}
bool Contains(const MemoryRegion& other) const {
return base_ <= other.base() && other.end() <= end();
}
private:
Address base_ = nullptr;
size_t size_ = 0;
};
// PageMemory provides the backing of a single normal or large page.
class V8_EXPORT_PRIVATE PageMemory final {
public:
PageMemory(MemoryRegion overall, MemoryRegion writeable)
: overall_(overall), writable_(writeable) {
DCHECK(overall.Contains(writeable));
}
const MemoryRegion writeable_region() const { return writable_; }
const MemoryRegion overall_region() const { return overall_; }
private:
MemoryRegion overall_;
MemoryRegion writable_;
};
class V8_EXPORT_PRIVATE PageMemoryRegion {
public:
virtual ~PageMemoryRegion();
const MemoryRegion reserved_region() const { return reserved_region_; }
bool is_large() const { return is_large_; }
// Lookup writeable base for an |address| that's contained in
// PageMemoryRegion. Filters out addresses that are contained in non-writeable
// regions (e.g. guard pages).
inline Address Lookup(Address address) const;
// Disallow copy/move.
PageMemoryRegion(const PageMemoryRegion&) = delete;
PageMemoryRegion& operator=(const PageMemoryRegion&) = delete;
virtual void UnprotectForTesting() = 0;
protected:
PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
PageAllocator* const allocator_;
const MemoryRegion reserved_region_;
const bool is_large_;
};
// NormalPageMemoryRegion serves kNumPageRegions normal-sized PageMemory object.
class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
public:
static constexpr size_t kNumPageRegions = 10;
explicit NormalPageMemoryRegion(PageAllocator*);
~NormalPageMemoryRegion() override;
const PageMemory GetPageMemory(size_t index) const {
DCHECK_LT(index, kNumPageRegions);
return PageMemory(
MemoryRegion(reserved_region().base() + kPageSize * index, kPageSize),
MemoryRegion(
reserved_region().base() + kPageSize * index + kGuardPageSize,
kPageSize - 2 * kGuardPageSize));
}
// Allocates a normal page at |writeable_base| address. Changes page
// protection.
void Allocate(Address writeable_base);
// Frees a normal page at at |writeable_base| address. Changes page
// protection.
void Free(Address);
inline Address Lookup(Address) const;
void UnprotectForTesting() final;
private:
void ChangeUsed(size_t index, bool value) {
DCHECK_LT(index, kNumPageRegions);
DCHECK_EQ(value, !page_memories_in_use_[index]);
page_memories_in_use_[index] = value;
}
size_t GetIndex(Address address) const {
return static_cast<size_t>(address - reserved_region().base()) >>
kPageSizeLog2;
}
std::array<bool, kNumPageRegions> page_memories_in_use_ = {};
};
// LargePageMemoryRegion serves a single large PageMemory object.
class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
public:
LargePageMemoryRegion(PageAllocator*, size_t);
~LargePageMemoryRegion() override;
const PageMemory GetPageMemory() const {
return PageMemory(
MemoryRegion(reserved_region().base(), reserved_region().size()),
MemoryRegion(reserved_region().base() + kGuardPageSize,
reserved_region().size() - 2 * kGuardPageSize));
}
inline Address Lookup(Address) const;
void UnprotectForTesting() final;
};
// A PageMemoryRegionTree is a binary search tree of PageMemoryRegions sorted
// by reserved base addresses.
//
// The tree does not keep its elements alive but merely provides indexing
// capabilities.
class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
public:
PageMemoryRegionTree();
~PageMemoryRegionTree();
void Add(PageMemoryRegion*);
void Remove(PageMemoryRegion*);
inline PageMemoryRegion* Lookup(Address) const;
private:
std::map<Address, PageMemoryRegion*> set_;
};
// A pool of PageMemory objects represented by the writeable base addresses.
//
// The pool does not keep its elements alive but merely provides pooling
// capabilities.
class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
public:
static constexpr size_t kNumPoolBuckets = 16;
using Result = std::pair<NormalPageMemoryRegion*, Address>;
NormalPageMemoryPool();
~NormalPageMemoryPool();
void Add(size_t, NormalPageMemoryRegion*, Address);
Result Take(size_t);
private:
std::vector<Result> pool_[kNumPoolBuckets];
};
// A backend that is used for allocating and freeing normal and large pages.
//
// Internally maintaints a set of PageMemoryRegions. The backend keeps its used
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
explicit PageBackend(PageAllocator*);
~PageBackend();
// Allocates a normal page from the backend.
//
// Returns the writeable base of the region.
Address AllocateNormalPageMemory(size_t);
// Returns normal page memory back to the backend. Expects the
// |writeable_base| returned by |AllocateNormalMemory()|.
void FreeNormalPageMemory(size_t, Address writeable_base);
// Allocates a large page from the backend.
//
// Returns the writeable base of the region.
Address AllocateLargePageMemory(size_t size);
// Returns large page memory back to the backend. Expects the |writeable_base|
// returned by |AllocateLargePageMemory()|.
void FreeLargePageMemory(Address writeable_base);
// Returns the writeable base if |address| is contained in a valid page
// memory.
inline Address Lookup(Address) const;
// Disallow copy/move.
PageBackend(const PageBackend&) = delete;
PageBackend& operator=(const PageBackend&) = delete;
private:
PageAllocator* allocator_;
NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_;
std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
std::unordered_map<PageMemoryRegion*, std::unique_ptr<PageMemoryRegion>>
large_page_memory_regions_;
};
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_PAGE_MEMORY_H_
......@@ -50,6 +50,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/heap-object-header_unittest.cc",
"heap/cppgc/heap_unittest.cc",
"heap/cppgc/member_unittests.cc",
"heap/cppgc/page-memory_unittest.cc",
"heap/cppgc/source-location_unittest.cc",
"heap/cppgc/stack_unittest.cc",
"heap/cppgc/tests.cc",
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment