Commit 62418750 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc: Add support for double-word aligned allocations

Adds support for double-word aligned, i.e., 8 bytes on 32-bit
platforms and 16 bytes on 64-bit platforms, objects in Oilpan.

Changes:
- Adds generic alignment APIs and overrides.
- Internal logic to support double-word aligned allocations on LABs.
- Adjusts natural alignment of large objects to follow double-word.
- Adds a new static_assert() that suggests users file a bug if higher
  alignment is required.
- Statically checks that no allocations with non-default alignment
  target custom spaces that support compaction.

Bug: v8:12295
Change-Id: I05766ce2349055d5d78b68919be00e7ee91d5505
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3218150Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77348}
parent 9c458346
......@@ -27,6 +27,9 @@ class AllocationHandle;
namespace internal {
// Similar to C++17 std::align_val_t;
enum class AlignVal : size_t {};
class V8_EXPORT MakeGarbageCollectedTraitInternal {
protected:
static inline void MarkObjectAsFullyConstructed(const void* payload) {
......@@ -45,32 +48,71 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal {
atomic_mutable_bitfield->store(value, std::memory_order_release);
}
template <typename U, typename CustomSpace>
struct SpacePolicy {
static void* Allocate(AllocationHandle& handle, size_t size) {
// Custom space.
// Dispatch based on compile-time information.
//
// Default implementation is for a custom space with >`kDefaultAlignment` byte
// alignment.
template <typename GCInfoType, typename CustomSpace, size_t alignment>
struct AllocationDispatcher final {
static void* Invoke(AllocationHandle& handle, size_t size) {
static_assert(std::is_base_of<CustomSpaceBase, CustomSpace>::value,
"Custom space must inherit from CustomSpaceBase.");
static_assert(
!CustomSpace::kSupportsCompaction,
"Custom spaces that support compaction do not support allocating "
"objects with non-default (i.e. word-sized) alignment.");
return MakeGarbageCollectedTraitInternal::Allocate(
handle, size, internal::GCInfoTrait<U>::Index(),
CustomSpace::kSpaceIndex);
handle, size, static_cast<AlignVal>(alignment),
internal::GCInfoTrait<GCInfoType>::Index(), CustomSpace::kSpaceIndex);
}
};
// Fast path for regular allocations for the default space with
// `kDefaultAlignment` byte alignment.
template <typename GCInfoType>
struct AllocationDispatcher<GCInfoType, void,
api_constants::kDefaultAlignment>
final {
static void* Invoke(AllocationHandle& handle, size_t size) {
return MakeGarbageCollectedTraitInternal::Allocate(
handle, size, internal::GCInfoTrait<GCInfoType>::Index());
}
};
// Default space with >`kDefaultAlignment` byte alignment.
template <typename GCInfoType, size_t alignment>
struct AllocationDispatcher<GCInfoType, void, alignment> final {
static void* Invoke(AllocationHandle& handle, size_t size) {
return MakeGarbageCollectedTraitInternal::Allocate(
handle, size, static_cast<AlignVal>(alignment),
internal::GCInfoTrait<GCInfoType>::Index());
}
};
template <typename U>
struct SpacePolicy<U, void> {
static void* Allocate(AllocationHandle& handle, size_t size) {
// Default space.
// Custom space with `kDefaultAlignment` byte alignment.
template <typename GCInfoType, typename CustomSpace>
struct AllocationDispatcher<GCInfoType, CustomSpace,
api_constants::kDefaultAlignment>
final {
static void* Invoke(AllocationHandle& handle, size_t size) {
static_assert(std::is_base_of<CustomSpaceBase, CustomSpace>::value,
"Custom space must inherit from CustomSpaceBase.");
return MakeGarbageCollectedTraitInternal::Allocate(
handle, size, internal::GCInfoTrait<U>::Index());
handle, size, internal::GCInfoTrait<GCInfoType>::Index(),
CustomSpace::kSpaceIndex);
}
};
private:
static void* Allocate(cppgc::AllocationHandle& handle, size_t size,
GCInfoIndex index);
static void* Allocate(cppgc::AllocationHandle& handle, size_t size,
AlignVal alignment, GCInfoIndex index);
static void* Allocate(cppgc::AllocationHandle& handle, size_t size,
GCInfoIndex index, CustomSpaceIndex space_index);
static void* Allocate(cppgc::AllocationHandle& handle, size_t size,
AlignVal alignment, GCInfoIndex index,
CustomSpaceIndex space_index);
friend class HeapObjectHeader;
};
......@@ -109,10 +151,18 @@ class MakeGarbageCollectedTraitBase
std::is_base_of<typename T::ParentMostGarbageCollectedType, T>::value,
"U of GarbageCollected<U> must be a base of T. Check "
"GarbageCollected<T> base class inheritance.");
return SpacePolicy<
static constexpr size_t kWantedAlignment =
alignof(T) < internal::api_constants::kDefaultAlignment
? internal::api_constants::kDefaultAlignment
: alignof(T);
static_assert(
kWantedAlignment <= internal::api_constants::kMaxSupportedAlignment,
"Requested alignment larger than alignof(std::max_align_t) bytes. "
"Please file a bug to possibly get this restriction lifted.");
return AllocationDispatcher<
typename internal::GCInfoFolding<
T, typename T::ParentMostGarbageCollectedType>::ResultType,
typename SpaceTrait<T>::Space>::Allocate(handle, size);
typename SpaceTrait<T>::Space, kWantedAlignment>::Invoke(handle, size);
}
/**
......
......@@ -39,6 +39,14 @@ constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
#endif
static constexpr size_t kDefaultAlignment = sizeof(void*);
// Maximum support alignment for a type as in `alignof(T)`.
static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment;
static_assert(kMaxSupportedAlignment >= alignof(std::max_align_t),
"Maximum support alignment must at least cover "
"alignof(std::max_align_t).");
} // namespace api_constants
} // namespace internal
......
......@@ -29,6 +29,16 @@ CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index);
}
// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
// fast path.
// static
CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
cppgc::AllocationHandle& handle, size_t size, AlignVal alignment,
GCInfoIndex index) {
return static_cast<ObjectAllocator&>(handle).AllocateObject(size, alignment,
index);
}
// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
// fast path.
// static
......@@ -39,5 +49,15 @@ CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
space_index);
}
// Using CPPGC_FORCE_ALWAYS_INLINE to guide LTO for inlining the allocation
// fast path.
// static
CPPGC_FORCE_ALWAYS_INLINE void* MakeGarbageCollectedTraitInternal::Allocate(
cppgc::AllocationHandle& handle, size_t size, AlignVal alignment,
GCInfoIndex index, CustomSpaceIndex space_index) {
return static_cast<ObjectAllocator&>(handle).AllocateObject(
size, alignment, index, space_index);
}
} // namespace internal
} // namespace cppgc
......@@ -24,8 +24,8 @@ uint32_t BucketIndexForSize(uint32_t size) {
class FreeList::Entry : public HeapObjectHeader {
public:
explicit Entry(size_t size) : HeapObjectHeader(size, kFreeListGCInfoIndex) {
static_assert(sizeof(Entry) == kFreeListEntrySize, "Sizes must match");
static Entry& CreateAt(void* memory, size_t size) {
return *new (memory) Entry(size);
}
Entry* Next() const { return next_; }
......@@ -41,6 +41,10 @@ class FreeList::Entry : public HeapObjectHeader {
}
private:
explicit Entry(size_t size) : HeapObjectHeader(size, kFreeListGCInfoIndex) {
static_assert(sizeof(Entry) == kFreeListEntrySize, "Sizes must match");
}
Entry* next_ = nullptr;
};
......@@ -65,26 +69,28 @@ Address FreeList::Add(FreeList::Block block) {
DCHECK_GT(kPageSize, size);
DCHECK_LE(sizeof(HeapObjectHeader), size);
if (block.size < sizeof(Entry)) {
if (size < sizeof(Entry)) {
// Create wasted entry. This can happen when an almost emptied linear
// allocation buffer is returned to the freelist.
// This could be SET_MEMORY_ACCESSIBLE. Since there's no payload, the next
// operating overwrites the memory completely, and we can thus avoid
// zeroing it out.
ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader));
new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
return reinterpret_cast<Address>(block.address) + block.size;
auto& filler = Filler::CreateAt(block.address, size);
USE(filler);
DCHECK_EQ(reinterpret_cast<Address>(block.address) + size,
filler.ObjectEnd());
return reinterpret_cast<Address>(block.address) + size;
}
// Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
// needed as we write the whole payload of Entry.
ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(Entry));
Entry* entry = new (block.address) Entry(size);
Entry& entry = Entry::CreateAt(block.address, size);
const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
entry->Link(&free_list_heads_[index]);
entry.Link(&free_list_heads_[index]);
biggest_free_list_index_ = std::max(biggest_free_list_index_, index);
if (!entry->Next()) {
free_list_tails_[index] = entry;
if (!entry.Next()) {
free_list_tails_[index] = &entry;
}
return reinterpret_cast<Address>(block.address) + sizeof(Entry);
}
......
......@@ -9,12 +9,26 @@
#include "include/cppgc/heap-statistics.h"
#include "src/base/macros.h"
#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
class Filler : public HeapObjectHeader {
public:
static Filler& CreateAt(void* memory, size_t size) {
// The memory area only needs to unpoisoned when running with ASAN. Zapped
// values (DEBUG) or uninitialized values (MSAN) are overwritten below.
ASAN_UNPOISON_MEMORY_REGION(memory, sizeof(Filler));
return *new (memory) Filler(size);
}
protected:
explicit Filler(size_t size) : HeapObjectHeader(size, kFreeListGCInfoIndex) {}
};
class V8_EXPORT_PRIVATE FreeList {
public:
struct Block {
......
......@@ -210,16 +210,20 @@ LargePage::~LargePage() = default;
// static
size_t LargePage::AllocationSize(size_t payload_size) {
const size_t page_header_size =
RoundUp(sizeof(LargePage), kAllocationGranularity);
return page_header_size + payload_size;
return PageHeaderSize() + payload_size;
}
// static
LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
size_t size) {
DCHECK_LE(kLargeObjectSizeThreshold, size);
// Ensure that the API-provided alignment guarantees does not violate the
// internally guaranteed alignment of large page allocations.
STATIC_ASSERT(kGuaranteedObjectAlignment <=
api_constants::kMaxSupportedAlignment);
STATIC_ASSERT(
api_constants::kMaxSupportedAlignment % kGuaranteedObjectAlignment == 0);
DCHECK_LE(kLargeObjectSizeThreshold, size);
const size_t allocation_size = AllocationSize(size);
auto* heap = space.raw_heap()->heap();
......@@ -253,8 +257,7 @@ const HeapObjectHeader* LargePage::ObjectHeader() const {
}
Address LargePage::PayloadStart() {
return AlignAddress((reinterpret_cast<Address>(this + 1)),
kAllocationGranularity);
return reinterpret_cast<Address>(this) + PageHeaderSize();
}
ConstAddress LargePage::PayloadStart() const {
......
......@@ -202,6 +202,15 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
class V8_EXPORT_PRIVATE LargePage final : public BasePage {
public:
static constexpr size_t PageHeaderSize() {
// Header should be un-aligned to `kAllocationGranularity` so that adding a
// `HeapObjectHeader` gets the user object aligned to
// `kGuaranteedObjectAlignment`.
return RoundUp<kGuaranteedObjectAlignment>(sizeof(LargePage) +
sizeof(HeapObjectHeader)) -
sizeof(HeapObjectHeader);
}
// Returns the allocation size required for a payload of size |size|.
static size_t AllocationSize(size_t size);
// Allocates a new page in the detached state.
......@@ -239,6 +248,9 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
}
private:
static constexpr size_t kGuaranteedObjectAlignment =
2 * kAllocationGranularity;
LargePage(HeapBase& heap, BaseSpace& space, size_t);
~LargePage();
......
......@@ -4,6 +4,7 @@
#include "src/heap/cppgc/object-allocator.h"
#include "include/cppgc/allocation.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/free-list.h"
......@@ -22,6 +23,7 @@
namespace cppgc {
namespace internal {
namespace {
void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
......@@ -115,8 +117,9 @@ ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
prefinalizer_handler_(prefinalizer_handler) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
AlignVal alignment,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
void* memory = OutOfLineAllocateImpl(space, size, alignment, gcinfo);
stats_collector_.NotifySafePointForConservativeCollection();
if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
// Objects allocated during pre finalizers should be allocated as black
......@@ -132,68 +135,79 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
}
void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
size_t size, GCInfoIndex gcinfo) {
size_t size, AlignVal alignment,
GCInfoIndex gcinfo) {
DCHECK_EQ(0, size & kAllocationMask);
DCHECK_LE(kFreeListEntrySize, size);
// Out-of-line allocation allows for checking this is all situations.
CHECK(!in_disallow_gc_scope());
// 1. If this allocation is big enough, allocate a large object.
// If this allocation is big enough, allocate a large object.
if (size >= kLargeObjectSizeThreshold) {
auto& large_space = LargePageSpace::From(
*raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
// LargePage has a natural alignment that already satisfies
// `kMaxSupportedAlignment`.
return AllocateLargeObject(page_backend_, large_space, stats_collector_,
size, gcinfo);
}
// 2. Try to allocate from the freelist.
if (void* result = AllocateFromFreeList(space, size, gcinfo)) {
return result;
size_t request_size = size;
// Adjust size to be able to accommodate alignment.
const size_t dynamic_alignment = static_cast<size_t>(alignment);
if (dynamic_alignment != kAllocationGranularity) {
CHECK_EQ(2 * sizeof(HeapObjectHeader), dynamic_alignment);
request_size += kAllocationGranularity;
}
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
RefillLinearAllocationBuffer(space, request_size);
// The allocation must succeed, as we just refilled the LAB.
void* result = (dynamic_alignment == kAllocationGranularity)
? AllocateObjectOnSpace(space, size, gcinfo)
: AllocateObjectOnSpace(space, size, alignment, gcinfo);
CHECK(result);
return result;
}
void ObjectAllocator::RefillLinearAllocationBuffer(NormalPageSpace& space,
size_t size) {
// Try to allocate from the freelist.
if (RefillLinearAllocationBufferFromFreeList(space, size)) return;
// Lazily sweep pages of this heap until we find a freed area for this
// allocation or we finish sweeping all pages of this heap.
Sweeper& sweeper = raw_heap_.heap()->sweeper();
// TODO(chromium:1056170): Investigate whether this should be a loop which
// would result in more agressive re-use of memory at the expense of
// potentially larger allocation time.
if (sweeper.SweepForAllocationIfRunning(&space, size)) {
// Sweeper found a block of at least `size` bytes. Allocation from the free
// list may still fail as actual buckets are not exhaustively searched for
// a suitable block. Instead, buckets are tested from larger sizes that are
// guaranteed to fit the block to smaller bucket sizes that may only
// potentially fit the block. For the bucket that may exactly fit the
// allocation of `size` bytes (no overallocation), only the first entry is
// checked.
if (void* result = AllocateFromFreeList(space, size, gcinfo)) {
return result;
}
// Sweeper found a block of at least `size` bytes. Allocation from the
// free list may still fail as actual buckets are not exhaustively
// searched for a suitable block. Instead, buckets are tested from larger
// sizes that are guaranteed to fit the block to smaller bucket sizes that
// may only potentially fit the block. For the bucket that may exactly fit
// the allocation of `size` bytes (no overallocation), only the first
// entry is checked.
if (RefillLinearAllocationBufferFromFreeList(space, size)) return;
}
// 4. Complete sweeping.
sweeper.FinishIfRunning();
// TODO(chromium:1056170): Make use of the synchronously freed memory.
// 5. Add a new page to this heap.
auto* new_page = NormalPage::Create(page_backend_, space);
space.AddPage(new_page);
// 6. Set linear allocation buffer to new page.
// Set linear allocation buffer to new page.
ReplaceLinearAllocationBuffer(space, stats_collector_,
new_page->PayloadStart(),
new_page->PayloadSize());
// 7. Allocate from it. The allocation must succeed.
void* result = AllocateObjectOnSpace(space, size, gcinfo);
CHECK(result);
return result;
}
void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
bool ObjectAllocator::RefillLinearAllocationBufferFromFreeList(
NormalPageSpace& space, size_t size) {
const FreeList::Block entry = space.free_list().Allocate(size);
if (!entry.address) return nullptr;
if (!entry.address) return false;
// Assume discarded memory on that page is now zero.
auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
......@@ -204,8 +218,7 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
ReplaceLinearAllocationBuffer(
space, stats_collector_, static_cast<Address>(entry.address), entry.size);
return AllocateObjectOnSpace(space, size, gcinfo);
return true;
}
void ObjectAllocator::ResetLinearAllocationBuffers() {
......
......@@ -9,6 +9,7 @@
#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/macros.h"
#include "src/base/logging.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
......@@ -43,8 +44,12 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
PreFinalizerHandler& prefinalizer_handler);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, AlignVal alignment,
GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index);
inline void* AllocateObject(size_t size, AlignVal alignment,
GCInfoIndex gcinfo, CustomSpaceIndex space_index);
void ResetLinearAllocationBuffers();
......@@ -61,9 +66,13 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo);
void* OutOfLineAllocate(NormalPageSpace&, size_t, GCInfoIndex);
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace&, size_t, GCInfoIndex);
inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
AlignVal alignment, GCInfoIndex gcinfo);
void* OutOfLineAllocate(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
void RefillLinearAllocationBuffer(NormalPageSpace&, size_t);
bool RefillLinearAllocationBufferFromFreeList(NormalPageSpace&, size_t);
RawHeap& raw_heap_;
PageBackend& page_backend_;
......@@ -81,6 +90,17 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
allocation_size, gcinfo);
}
void* ObjectAllocator::AllocateObject(size_t size, AlignVal alignment,
GCInfoIndex gcinfo) {
DCHECK(!in_disallow_gc_scope());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
GetInitialSpaceIndexForSize(allocation_size);
return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_.Space(type)),
allocation_size, alignment, gcinfo);
}
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index) {
DCHECK(!in_disallow_gc_scope());
......@@ -91,6 +111,17 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
allocation_size, gcinfo);
}
void* ObjectAllocator::AllocateObject(size_t size, AlignVal alignment,
GCInfoIndex gcinfo,
CustomSpaceIndex space_index) {
DCHECK(!in_disallow_gc_scope());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
NormalPageSpace::From(*raw_heap_.CustomSpace(space_index)),
allocation_size, alignment, gcinfo);
}
// static
RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
size_t size) {
......@@ -104,6 +135,49 @@ RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
return RawHeap::RegularSpaceType::kNormal4;
}
void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
size_t size, AlignVal alignment,
GCInfoIndex gcinfo) {
// The APIs are set up to support general alignment. Since we want to keep
// track of the actual usage there the alignment support currently only covers
// double-world alignment (8 bytes on 32bit and 16 bytes on 64bit
// architectures). This is enforced on the public API via static_asserts
// against alignof(T).
STATIC_ASSERT(2 * kAllocationGranularity ==
api_constants::kMaxSupportedAlignment);
STATIC_ASSERT(kAllocationGranularity == sizeof(HeapObjectHeader));
DCHECK_EQ(2 * sizeof(HeapObjectHeader), static_cast<size_t>(alignment));
constexpr size_t kAlignment = 2 * kAllocationGranularity;
constexpr size_t kAlignmentMask = kAlignment - 1;
constexpr size_t kPaddingSize = kAlignment - sizeof(HeapObjectHeader);
NormalPageSpace::LinearAllocationBuffer& current_lab =
space.linear_allocation_buffer();
const size_t current_lab_size = current_lab.size();
// Case 1: The LAB fits the request and the LAB start is already properly
// aligned.
bool lab_allocation_will_succeed =
current_lab_size >= size &&
(reinterpret_cast<uintptr_t>(current_lab.start() +
sizeof(HeapObjectHeader)) &
kAlignmentMask) == 0;
// Case 2: The LAB fits an extended request to manually align the second
// allocation.
if (!lab_allocation_will_succeed &&
(current_lab_size >= (size + kPaddingSize))) {
void* filler_memory = current_lab.Allocate(kPaddingSize);
Filler::CreateAt(filler_memory, kPaddingSize);
lab_allocation_will_succeed = true;
}
if (lab_allocation_will_succeed) {
void* object = AllocateObjectOnSpace(space, size, gcinfo);
DCHECK_NOT_NULL(object);
DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(object) & kAlignmentMask);
return object;
}
return OutOfLineAllocate(space, size, alignment, gcinfo);
}
void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
size_t size, GCInfoIndex gcinfo) {
DCHECK_LT(0u, gcinfo);
......@@ -111,7 +185,8 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
NormalPageSpace::LinearAllocationBuffer& current_lab =
space.linear_allocation_buffer();
if (current_lab.size() < size) {
return OutOfLineAllocate(space, size, gcinfo);
return OutOfLineAllocate(
space, size, static_cast<AlignVal>(kAllocationGranularity), gcinfo);
}
void* raw = current_lab.Allocate(size);
......
......@@ -5,6 +5,7 @@
#include "include/cppgc/allocation.h"
#include "include/cppgc/visitor.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "test/unittests/heap/cppgc/tests.h"
#include "testing/gtest/include/gtest/gtest.h"
......@@ -130,5 +131,95 @@ TEST_F(CppgcAllocationTest, LargePagesAreZeroedOut) {
EXPECT_TRUE(reused_page);
}
namespace {
constexpr size_t kDoubleWord = 2 * sizeof(void*);
constexpr size_t kWord = sizeof(void*);
class alignas(kDoubleWord) DoubleWordAligned final
: public GarbageCollected<DoubleWordAligned> {
public:
void Trace(Visitor*) const {}
};
class alignas(kDoubleWord) LargeDoubleWordAligned
: public GarbageCollected<LargeDoubleWordAligned> {
public:
virtual void Trace(cppgc::Visitor*) const {}
char array[kLargeObjectSizeThreshold];
};
template <size_t Size>
class CustomPadding final : public GarbageCollected<CustomPadding<Size>> {
public:
void Trace(cppgc::Visitor* visitor) const {}
char base_size[128]; // Gets allocated in using RegularSpaceType::kNormal4.
char padding[Size];
};
template <size_t Size>
class alignas(kDoubleWord) AlignedCustomPadding final
: public GarbageCollected<AlignedCustomPadding<Size>> {
public:
void Trace(cppgc::Visitor* visitor) const {}
char base_size[128]; // Gets allocated in using RegularSpaceType::kNormal4.
char padding[Size];
};
} // namespace
TEST_F(CppgcAllocationTest, DoubleWordAlignedAllocation) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* gced = MakeGarbageCollected<DoubleWordAligned>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(gced) & kAlignmentMask);
}
TEST_F(CppgcAllocationTest, LargeDoubleWordAlignedAllocation) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* gced =
MakeGarbageCollected<LargeDoubleWordAligned>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(gced) & kAlignmentMask);
}
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromUnaligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* padding_object =
MakeGarbageCollected<CustomPadding<16>>(GetAllocationHandle());
// First allocation is not aligned.
ASSERT_EQ(kWord,
reinterpret_cast<uintptr_t>(padding_object) & kAlignmentMask);
// The end should also not be properly aligned.
ASSERT_EQ(kWord, (reinterpret_cast<uintptr_t>(padding_object) +
sizeof(*padding_object)) &
kAlignmentMask);
auto* aligned_object =
MakeGarbageCollected<AlignedCustomPadding<16>>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(aligned_object) & kAlignmentMask);
// Test only yielded a reliable result if objects are adjacent to each other.
ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
sizeof(*padding_object) + sizeof(HeapObjectHeader),
reinterpret_cast<uintptr_t>(aligned_object));
}
TEST_F(CppgcAllocationTest, AlignToDoubleWordFromAligned) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* padding_object =
MakeGarbageCollected<CustomPadding<kWord>>(GetAllocationHandle());
// First allocation is not aligned.
ASSERT_EQ(kWord,
reinterpret_cast<uintptr_t>(padding_object) & kAlignmentMask);
// The end should be properly aligned.
ASSERT_EQ(0u, (reinterpret_cast<uintptr_t>(padding_object) +
sizeof(*padding_object)) &
kAlignmentMask);
auto* aligned_object =
MakeGarbageCollected<AlignedCustomPadding<16>>(GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(aligned_object) & kAlignmentMask);
// Test only yielded a reliable result if objects are adjacent to each other.
ASSERT_EQ(reinterpret_cast<uintptr_t>(padding_object) +
sizeof(*padding_object) + 2 * sizeof(HeapObjectHeader),
reinterpret_cast<uintptr_t>(aligned_object));
}
} // namespace internal
} // namespace cppgc
......@@ -77,6 +77,14 @@ class CustomGCedFinal2 final : public CustomGCedBase {
~CustomGCedFinal2() { g_destructor_callcount++; }
};
constexpr size_t kDoubleWord = 2 * sizeof(void*);
class alignas(kDoubleWord) CustomGCedWithDoubleWordAlignment final
: public GarbageCollected<CustomGCedWithDoubleWordAlignment> {
public:
void Trace(Visitor*) const {}
};
} // namespace
} // namespace internal
......@@ -97,6 +105,11 @@ struct SpaceTrait<
using Space = CustomSpace1;
};
template <>
struct SpaceTrait<internal::CustomGCedWithDoubleWordAlignment> {
using Space = CustomSpace1;
};
namespace internal {
TEST_F(TestWithHeapWithCustomSpaces, AllocateOnCustomSpaces) {
......@@ -114,6 +127,14 @@ TEST_F(TestWithHeapWithCustomSpaces, AllocateOnCustomSpaces) {
NormalPage::FromPayload(regular)->space().index());
}
TEST_F(TestWithHeapWithCustomSpaces, AllocateDoubleWordAlignedOnCustomSpace) {
static constexpr size_t kAlignmentMask = kDoubleWord - 1;
auto* custom_aligned =
MakeGarbageCollected<CustomGCedWithDoubleWordAlignment>(
GetHeap()->GetAllocationHandle());
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(custom_aligned) & kAlignmentMask);
}
TEST_F(TestWithHeapWithCustomSpaces, DifferentSpacesUsesDifferentPages) {
auto* regular =
MakeGarbageCollected<RegularGCed>(GetHeap()->GetAllocationHandle());
......
......@@ -104,7 +104,7 @@ TEST_F(HeapStatisticsCollectorTest, NonEmptyLargePage) {
static constexpr size_t used_size = RoundUp<kAllocationGranularity>(
kLargeObjectSizeThreshold + sizeof(HeapObjectHeader));
static constexpr size_t committed_size =
RoundUp<kAllocationGranularity>(used_size + sizeof(LargePage));
RoundUp<kAllocationGranularity>(used_size + LargePage::PageHeaderSize());
HeapStatistics detailed_stats = Heap::From(GetHeap())->CollectStatistics(
HeapStatistics::DetailLevel::kDetailed);
EXPECT_EQ(HeapStatistics::DetailLevel::kDetailed,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment