Commit 703b0b31 authored by Teodor Dutu's avatar Teodor Dutu Committed by V8 LUCI CQ

[ptr-compr-8gb] Align runtime allocations to 8 bytes

In order to support a larger heap cage (8GB, 16GB), the cage offset
will take up more than 32 bits. As a consequence, for 8GB cages, the
least significant bit of the cage offset will overlap with the most
significant bit of the tagged offset. To avoid this, allocations need
to be aligned to 8 bytes to free up one bit from the offset.
All changes are deactivated behind the build flag
`v8_enable_pointer_compression_8gb`.

Bug: v8:13070
Change-Id: Ibb0bd0177f3e88dcd24fc0ee7526335df0faa987
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3791052Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Auto-Submit: Teo Dutu <teodutu@google.com>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82299}
parent 9cca4e60
......@@ -653,6 +653,10 @@ constexpr int kObjectAlignmentBits = kTaggedSizeLog2;
constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
// Object alignment for 8GB pointer compressed heap.
constexpr intptr_t kObjectAlignment8GbHeap = 8;
constexpr intptr_t kObjectAlignment8GbHeapMask = kObjectAlignment8GbHeap - 1;
// Desired alignment for system pointers.
constexpr intptr_t kPointerAlignment = (1 << kSystemPointerSizeLog2);
constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
......
......@@ -33,7 +33,8 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
}
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
if (V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)) {
result = lab_.AllocateRawAligned(size_in_bytes, alignment);
} else {
result = lab_.AllocateRawUnaligned(size_in_bytes);
......
......@@ -11,6 +11,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/builtins/accessors.h"
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compilation-cache.h"
......@@ -2136,6 +2137,9 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
if (!site.is_null()) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
adjusted_object_size += AllocationMemento::kSize;
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(object_size, kObjectAlignment8GbHeap))
adjusted_object_size += kObjectAlignment8GbHeap - kTaggedSize;
}
HeapObject raw_clone =
allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
......@@ -2154,8 +2158,14 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
isolate()->heap()->WriteBarrierForRange(raw_clone, start, end);
}
if (!site.is_null()) {
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
Object(raw_clone.ptr() + object_size));
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(object_size, kObjectAlignment8GbHeap)) {
isolate()->heap()->CreateFillerObjectAt(
raw_clone.address() + object_size,
kObjectAlignment8GbHeap - kTaggedSize);
}
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(Object(
raw_clone.ptr() + adjusted_object_size - AllocationMemento::kSize));
InitializeAllocationMemento(alloc_memento, *site);
}
......
......@@ -16,6 +16,7 @@
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/once.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/wrappers.h"
......@@ -3089,6 +3090,9 @@ static_assert(!USE_ALLOCATION_ALIGNMENT_BOOL ||
(HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
if (V8_COMPRESS_POINTERS_8GB_BOOL)
return kObjectAlignment8GbHeap - kTaggedSize;
switch (alignment) {
case kTaggedAligned:
return 0;
......@@ -3102,6 +3106,11 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
// static
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
if (V8_COMPRESS_POINTERS_8GB_BOOL) {
return IsAligned(address, kObjectAlignment8GbHeap)
? 0
: kObjectAlignment8GbHeap - kTaggedSize;
}
if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kTaggedSize;
if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
......
......@@ -100,7 +100,13 @@ V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
// TODO(teodutu): remove the need for this special case by ensuring that the
// allocation top stays properly aligned after allocations.
if (V8_COMPRESS_POINTERS_8GB_BOOL && executable_ == EXECUTABLE) {
DCHECK(IsAligned(allocation_info_.top(), kCodeAlignment));
} else {
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
}
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
......
......@@ -8,7 +8,9 @@
#include <cstring>
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h"
......@@ -297,6 +299,13 @@ HeapObject ReadOnlyHeapObjectIterator::Next() {
continue;
}
HeapObject object = HeapObject::FromAddress(current_addr_);
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(current_addr_, kObjectAlignment8GbHeap) &&
!object.IsFreeSpace()) {
current_addr_ = RoundUp<kObjectAlignment8GbHeap>(current_addr_);
continue;
}
const int object_size = object.Size();
current_addr_ += object_size;
......
......@@ -9,6 +9,7 @@
#include "include/v8-internal.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/execution/isolate.h"
......@@ -17,6 +18,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot-utils.h"
......@@ -435,6 +437,16 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
// TODO(teodutu): Simplify checking for one pointer fillers. We cannot
// verifiy them directly because some of the objects here are initialised
// before the one pointer filler map, which leads to the wrong map being
// written instead.
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(cur_addr_, kObjectAlignment8GbHeap) &&
!obj.IsFreeSpace()) {
cur_addr_ = RoundUp<kObjectAlignment8GbHeap>(cur_addr_);
continue;
}
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
......@@ -679,7 +691,8 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult result =
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
......
......@@ -256,7 +256,8 @@ AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
if (V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
......@@ -322,7 +323,8 @@ AllocationResult SpaceWithLinearArea::AllocateRawAligned(
AllocationResult SpaceWithLinearArea::AllocateRawSlow(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult result =
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
return result;
......
......@@ -40,7 +40,13 @@ void ReadOnlyRoots::VerifyNameForProtectors() {
// Make sure the objects are adjacent in memory.
CHECK_LT(prev.address(), current.address());
Address computed_address = prev.address() + prev.Size();
CHECK_EQ(computed_address, current.address());
// TODO(teodutu): remove the need for this special case by ensuring that
// all object sizes are properly aligned.
if (V8_COMPRESS_POINTERS_8GB_BOOL) {
CHECK_LE(current.address() - computed_address, 4);
} else {
CHECK_EQ(computed_address, current.address());
}
}
prev = current;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment