Commit a77183b1 authored by Teodor Dutu's avatar Teodor Dutu Committed by V8 LUCI CQ

Revert "[ptr-compr-8gb] Align runtime allocations to 8 bytes"

This reverts commit 703b0b31.

Reason for revert: a simpler approach will be used instead.

Original change's description:
> [ptr-compr-8gb] Align runtime allocations to 8 bytes
>
> In order to support a larger heap cage (8GB, 16GB), the cage offset
> will take up more than 32 bits. As a consequence, for 8GB cages, the
> least significant bit of the cage offset will overlap with the most
> significant bit of the tagged offset. To avoid this, allocations need
> to be aligned to 8 bytes to free up one bit from the offset.
> All changes are deactivated behind the build flag
> `v8_enable_pointer_compression_8gb`.
>
> Bug: v8:13070
> Change-Id: Ibb0bd0177f3e88dcd24fc0ee7526335df0faa987
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3791052
> Reviewed-by: Igor Sheludko <ishell@chromium.org>
> Auto-Submit: Teo Dutu <teodutu@google.com>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#82299}

Bug: v8:13070
Change-Id: I5cb60f8e4500c908bdef5d417393edbe89652c9c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3877146Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Auto-Submit: Teo Dutu <teodutu@google.com>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83030}
parent 569e32af
......@@ -33,8 +33,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
}
AllocationResult result;
if (V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)) {
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = lab_.AllocateRawAligned(size_in_bytes, alignment);
} else {
result = lab_.AllocateRawUnaligned(size_in_bytes);
......
......@@ -11,7 +11,6 @@
#include "src/ast/ast-source-ranges.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/builtins/accessors.h"
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compilation-cache.h"
......@@ -2124,9 +2123,6 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
if (!site.is_null()) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
adjusted_object_size += AllocationMemento::kSize;
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(object_size, kObjectAlignment8GbHeap))
adjusted_object_size += kObjectAlignment8GbHeap - kTaggedSize;
}
HeapObject raw_clone =
allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
......@@ -2145,14 +2141,8 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
isolate()->heap()->WriteBarrierForRange(raw_clone, start, end);
}
if (!site.is_null()) {
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(object_size, kObjectAlignment8GbHeap)) {
isolate()->heap()->CreateFillerObjectAt(
raw_clone.address() + object_size,
kObjectAlignment8GbHeap - kTaggedSize);
}
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(Object(
raw_clone.ptr() + adjusted_object_size - AllocationMemento::kSize));
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
Object(raw_clone.ptr() + object_size));
InitializeAllocationMemento(alloc_memento, *site);
}
......
......@@ -16,7 +16,6 @@
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/once.h"
#include "src/base/platform/memory.h"
#include "src/base/platform/mutex.h"
......@@ -3095,9 +3094,6 @@ static_assert(!USE_ALLOCATION_ALIGNMENT_BOOL ||
(HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
if (V8_COMPRESS_POINTERS_8GB_BOOL)
return kObjectAlignment8GbHeap - kTaggedSize;
switch (alignment) {
case kTaggedAligned:
return 0;
......@@ -3111,11 +3107,6 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
// static
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
if (V8_COMPRESS_POINTERS_8GB_BOOL) {
return IsAligned(address, kObjectAlignment8GbHeap)
? 0
: kObjectAlignment8GbHeap - kTaggedSize;
}
if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kTaggedSize;
if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
......
......@@ -100,13 +100,7 @@ V8_INLINE bool PagedSpaceBase::EnsureAllocation(int size_in_bytes,
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
// TODO(teodutu): remove the need for this special case by ensuring that the
// allocation top stays properly aligned after allocations.
if (V8_COMPRESS_POINTERS_8GB_BOOL && executable_ == EXECUTABLE) {
DCHECK(IsAligned(allocation_info_.top(), kCodeAlignment));
} else {
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
}
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
......
......@@ -8,9 +8,7 @@
#include <cstring>
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h"
......@@ -299,13 +297,6 @@ HeapObject ReadOnlyHeapObjectIterator::Next() {
continue;
}
HeapObject object = HeapObject::FromAddress(current_addr_);
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(current_addr_, kObjectAlignment8GbHeap) &&
!object.IsFreeSpace()) {
current_addr_ = RoundUp<kObjectAlignment8GbHeap>(current_addr_);
continue;
}
const int object_size = object.Size();
current_addr_ += object_size;
......
......@@ -9,7 +9,6 @@
#include "include/v8-internal.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/execution/isolate.h"
......@@ -18,7 +17,6 @@
#include "src/heap/heap-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot-data.h"
#include "src/snapshot/snapshot-utils.h"
......@@ -437,16 +435,6 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
// TODO(teodutu): Simplify checking for one pointer fillers. We cannot
// verifiy them directly because some of the objects here are initialised
// before the one pointer filler map, which leads to the wrong map being
// written instead.
if (V8_COMPRESS_POINTERS_8GB_BOOL &&
!IsAligned(cur_addr_, kObjectAlignment8GbHeap) &&
!obj.IsFreeSpace()) {
cur_addr_ = RoundUp<kObjectAlignment8GbHeap>(cur_addr_);
continue;
}
const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
......@@ -691,8 +679,7 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult result =
V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
......
......@@ -256,8 +256,7 @@ AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)) {
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
......@@ -323,8 +322,7 @@ AllocationResult SpaceWithLinearArea::AllocateRawAligned(
AllocationResult SpaceWithLinearArea::AllocateRawSlow(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult result =
V8_COMPRESS_POINTERS_8GB_BOOL ||
(USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned)
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
return result;
......
......@@ -40,13 +40,7 @@ void ReadOnlyRoots::VerifyNameForProtectors() {
// Make sure the objects are adjacent in memory.
CHECK_LT(prev.address(), current.address());
Address computed_address = prev.address() + prev.Size();
// TODO(teodutu): remove the need for this special case by ensuring that
// all object sizes are properly aligned.
if (V8_COMPRESS_POINTERS_8GB_BOOL) {
CHECK_LE(current.address() - computed_address, 4);
} else {
CHECK_EQ(computed_address, current.address());
}
CHECK_EQ(computed_address, current.address());
}
prev = current;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment