Commit 579c3b0f authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[heap][cleanup] Rename kWordAligned to kTaggedAligned

Also introduce USE_ALLOCATION_ALIGNMENT_BOOL constant which is true
only for those configurations that require aligned allocations and
use it for statically falling back to unaligned allocations on those
configurations that do not require aligned allocations.

This is a prerequisite for introducing the real kWordAligned mode for
kSystemPointerSize aligned allocations.

Bug: v8:8875
Change-Id: I155d12435f344324bc1bf19da88ee823c8f2ca6c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3283064Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77918}
parent 55cd8648
...@@ -868,8 +868,27 @@ inline constexpr bool IsSharedAllocationType(AllocationType kind) { ...@@ -868,8 +868,27 @@ inline constexpr bool IsSharedAllocationType(AllocationType kind) {
kind == AllocationType::kSharedMap; kind == AllocationType::kSharedMap;
} }
// TODO(ishell): review and rename kWordAligned to kTaggedAligned. enum AllocationAlignment {
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned }; // The allocated address is kTaggedSize aligned (this is default for most of
// the allocations).
kTaggedAligned,
// The allocated address is kDoubleSize aligned.
kDoubleAligned,
// The (allocated address + kTaggedSize) is kDoubleSize aligned.
kDoubleUnaligned
};
#ifdef V8_HOST_ARCH_32_BIT
#define USE_ALLOCATION_ALIGNMENT_BOOL true
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
#define USE_ALLOCATION_ALIGNMENT_BOOL false
#endif // V8_HOST_ARCH_32_BIT
enum class AccessMode { ATOMIC, NON_ATOMIC }; enum class AccessMode { ATOMIC, NON_ATOMIC };
......
...@@ -36,7 +36,7 @@ void StressConcurrentAllocatorTask::RunInternal() { ...@@ -36,7 +36,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationResult result = local_heap.AllocateRaw( AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) { if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground( heap->CreateFillerObjectAtBackground(
result.ToAddress(), kSmallObjectSize, result.ToAddress(), kSmallObjectSize,
...@@ -47,7 +47,7 @@ void StressConcurrentAllocatorTask::RunInternal() { ...@@ -47,7 +47,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld, result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) { if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground( heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize, result.ToAddress(), kMediumObjectSize,
...@@ -58,7 +58,7 @@ void StressConcurrentAllocatorTask::RunInternal() { ...@@ -58,7 +58,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld, result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) { if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground( heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize, result.ToAddress(), kLargeObjectSize,
...@@ -133,7 +133,7 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow( ...@@ -133,7 +133,7 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground( auto result = space_->RawRefillLabBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin); local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
if (!result) return false; if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) { if (local_heap_->heap()->incremental_marking()->black_allocation()) {
......
...@@ -269,7 +269,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase ...@@ -269,7 +269,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
HeapObject AllocateRawWithImmortalMap( HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map, int size, AllocationType allocation, Map map,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
HeapObject NewWithImmortalMap(Map map, AllocationType allocation); HeapObject NewWithImmortalMap(Map map, AllocationType allocation);
Handle<FixedArray> NewFixedArrayWithFiller(Handle<Map> map, int length, Handle<FixedArray> NewFixedArrayWithFiller(Handle<Map> map, int length,
...@@ -295,7 +295,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase ...@@ -295,7 +295,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); } ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
HeapObject AllocateRaw(int size, AllocationType allocation, HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
friend TorqueGeneratedFactory<Impl>; friend TorqueGeneratedFactory<Impl>;
}; };
......
...@@ -378,10 +378,10 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) { ...@@ -378,10 +378,10 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
return result; return result;
} }
Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align, Handle<HeapObject> Factory::NewFillerObject(int size,
AllocationAlignment alignment,
AllocationType allocation, AllocationType allocation,
AllocationOrigin origin) { AllocationOrigin origin) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap(); Heap* heap = isolate()->heap();
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>( HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
size, allocation, origin, alignment); size, allocation, origin, alignment);
......
...@@ -444,7 +444,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> { ...@@ -444,7 +444,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocate a block of memory of the given AllocationType (filled with a // Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full. // filler). Used as a fall-back for generated code when the space is full.
Handle<HeapObject> NewFillerObject( Handle<HeapObject> NewFillerObject(
int size, bool double_align, AllocationType allocation, int size, AllocationAlignment alignment, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime); AllocationOrigin origin = AllocationOrigin::kRuntime);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
...@@ -989,7 +989,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> { ...@@ -989,7 +989,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// ------ // ------
// Customization points for FactoryBase // Customization points for FactoryBase
HeapObject AllocateRaw(int size, AllocationType allocation, HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
Isolate* isolate() const { Isolate* isolate() const {
// Downcast to the privately inherited sub-class using c-style casts to // Downcast to the privately inherited sub-class using c-style casts to
......
...@@ -207,7 +207,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, ...@@ -207,7 +207,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed()); DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed()); DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap, DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
alignment == AllocationAlignment::kWordAligned); alignment == AllocationAlignment::kTaggedAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC); DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) { if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
...@@ -321,7 +321,7 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation, ...@@ -321,7 +321,7 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK_EQ(gc_state(), NOT_IN_GC); DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap(); Heap* heap = isolate()->heap();
if (allocation == AllocationType::kYoung && if (allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned && alignment == AllocationAlignment::kTaggedAligned &&
size <= MaxRegularHeapObjectSize(allocation) && size <= MaxRegularHeapObjectSize(allocation) &&
V8_LIKELY(!FLAG_single_generation && FLAG_inline_new && V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
FLAG_gc_interval == -1)) { FLAG_gc_interval == -1)) {
......
...@@ -3005,13 +3005,12 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize)); ...@@ -3005,13 +3005,12 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment)); STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif #endif
#ifdef V8_HOST_ARCH_32_BIT STATIC_ASSERT(!USE_ALLOCATION_ALIGNMENT_BOOL ||
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize); (HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) { int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) { switch (alignment) {
case kWordAligned: case kTaggedAligned:
return 0; return 0;
case kDoubleAligned: case kDoubleAligned:
case kDoubleUnaligned: case kDoubleUnaligned:
......
...@@ -2078,7 +2078,7 @@ class Heap { ...@@ -2078,7 +2078,7 @@ class Heap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw( V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation, int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung) // This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode. // otherwise it falls back to a slower path indicated by the mode.
...@@ -2087,13 +2087,13 @@ class Heap { ...@@ -2087,13 +2087,13 @@ class Heap {
V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith( V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
int size, AllocationType allocation, int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap. // Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail( V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size, AllocationType allocation, int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given // This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection // AllocationType. If the allocation fails, a regular full garbage collection
...@@ -2102,7 +2102,7 @@ class Heap { ...@@ -2102,7 +2102,7 @@ class Heap {
// returned. // returned.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath( V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin, int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given // This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection // AllocationType. If the allocation fails, a regular full garbage collection
...@@ -2112,7 +2112,7 @@ class Heap { ...@@ -2112,7 +2112,7 @@ class Heap {
// If the allocation still fails after that a fatal error is thrown. // If the allocation still fails after that a fatal error is thrown.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath( V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin, int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
// Allocates a heap object based on the map. // Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map, V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
......
...@@ -85,7 +85,7 @@ AllocationResult EvacuationAllocator::AllocateInLAB( ...@@ -85,7 +85,7 @@ AllocationResult EvacuationAllocator::AllocateInLAB(
bool EvacuationAllocator::NewLocalAllocationBuffer() { bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false; if (lab_allocation_will_fail_) return false;
AllocationResult result = AllocationResult result =
new_space_->AllocateRawSynchronized(kLabSize, kWordAligned); new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
if (result.IsRetry()) { if (result.IsRetry()) {
lab_allocation_will_fail_ = true; lab_allocation_will_fail_ = true;
return false; return false;
......
...@@ -57,7 +57,7 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> { ...@@ -57,7 +57,7 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------ // ------
// Customization points for FactoryBase. // Customization points for FactoryBase.
HeapObject AllocateRaw(int size, AllocationType allocation, HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
LocalIsolate* isolate() { LocalIsolate* isolate() {
// Downcast to the privately inherited sub-class using c-style casts to // Downcast to the privately inherited sub-class using c-style casts to
......
...@@ -25,7 +25,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type, ...@@ -25,7 +25,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed()); DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed()); DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap, DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
alignment == AllocationAlignment::kWordAligned); alignment == AllocationAlignment::kTaggedAligned);
Heap::HeapState state = heap()->gc_state(); Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC); DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
DCHECK(IsRunning()); DCHECK(IsRunning());
......
...@@ -130,14 +130,14 @@ class V8_EXPORT_PRIVATE LocalHeap { ...@@ -130,14 +130,14 @@ class V8_EXPORT_PRIVATE LocalHeap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw( V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation, int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
// Allocates an uninitialized object and crashes when object // Allocates an uninitialized object and crashes when object
// cannot be allocated. // cannot be allocated.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail( V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size_in_bytes, AllocationType allocation, int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kTaggedAligned);
inline void CreateFillerObjectAt(Address addr, int size, inline void CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode); ClearRecordedSlots clear_slots_mode);
......
...@@ -96,7 +96,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes, ...@@ -96,7 +96,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result; AllocationResult result;
if (alignment != kWordAligned) { if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin); result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else { } else {
result = AllocateFastUnaligned(size_in_bytes, origin); result = AllocateFastUnaligned(size_in_bytes, origin);
......
...@@ -611,25 +611,15 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) { ...@@ -611,25 +611,15 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes, AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
#ifdef V8_HOST_ARCH_32_BIT return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin) ? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin); : AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
} }
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes, AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) { AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap); DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kWordAligned)) { if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
return AllocationResult::Retry(NEW_SPACE); return AllocationResult::Retry(NEW_SPACE);
} }
......
...@@ -393,6 +393,10 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -393,6 +393,10 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationAlignment alignment, int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime); AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace. // Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea(); void ResetLinearAllocationArea();
...@@ -511,10 +515,6 @@ class V8_EXPORT_PRIVATE NewSpace ...@@ -511,10 +515,6 @@ class V8_EXPORT_PRIVATE NewSpace
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment, AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned( V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime); int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
......
...@@ -176,7 +176,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, ...@@ -176,7 +176,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK(!FLAG_enable_third_party_heap); DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result; AllocationResult result;
if (alignment != kWordAligned) { if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment); result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
} else { } else {
result = AllocateFastUnaligned(size_in_bytes); result = AllocateFastUnaligned(size_in_bytes);
......
...@@ -993,14 +993,10 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes, ...@@ -993,14 +993,10 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
kGCCallbackScheduleIdleGarbageCollection); kGCCallbackScheduleIdleGarbageCollection);
} }
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result = AllocationResult result =
alignment != kWordAligned USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin) ? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin); : AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
return result; return result;
} }
......
...@@ -692,13 +692,10 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) { ...@@ -692,13 +692,10 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes, AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT AllocationResult result =
AllocationResult result = alignment != kWordAligned USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment) ? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes); : AllocateRawUnaligned(size_in_bytes);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
HeapObject heap_obj; HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj)) { if (!result.IsRetry() && result.To(&heap_obj)) {
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj)); DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
......
...@@ -967,21 +967,16 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode( ...@@ -967,21 +967,16 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
// static // static
AllocationAlignment HeapObject::RequiredAlignment(Map map) { AllocationAlignment HeapObject::RequiredAlignment(Map map) {
// TODO(bmeurer, v8:4153): We should think about requiring double alignment // TODO(v8:4153): We should think about requiring double alignment
// in general for ByteArray, since they are used as backing store for typed // in general for ByteArray, since they are used as backing store for typed
// arrays now. // arrays now.
#ifdef V8_COMPRESS_POINTERS // TODO(ishell, v8:8875): Consider using aligned allocations for BigInt.
// TODO(ishell, v8:8875): Consider using aligned allocations once the if (USE_ALLOCATION_ALIGNMENT_BOOL) {
// allocation alignment inconsistency is fixed. For now we keep using int instance_type = map.instance_type();
// unaligned access since both x64 and arm64 architectures (where pointer if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
// compression is supported) allow unaligned access to doubles and full words. if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
#endif // V8_COMPRESS_POINTERS }
#ifdef V8_HOST_ARCH_32_BIT return kTaggedAligned;
int instance_type = map.instance_type();
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
} }
Address HeapObject::GetFieldAddress(int field_offset) const { Address HeapObject::GetFieldAddress(int field_offset) const {
......
...@@ -458,7 +458,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) { ...@@ -458,7 +458,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
DCHECK_EQ(2, args.length()); DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0); CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1); CONVERT_SMI_ARG_CHECKED(flags, 1);
bool double_align = AllocateDoubleAlignFlag::decode(flags); AllocationAlignment alignment =
AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation = bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags); AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize)); CHECK(IsAligned(size, kTaggedSize));
...@@ -479,9 +480,9 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) { ...@@ -479,9 +480,9 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
// TODO(v8:9472): Until double-aligned allocation is fixed for new-space // TODO(v8:9472): Until double-aligned allocation is fixed for new-space
// allocations, don't request it. // allocations, don't request it.
double_align = false; alignment = kTaggedAligned;
return *isolate->factory()->NewFillerObject(size, double_align, return *isolate->factory()->NewFillerObject(size, alignment,
AllocationType::kYoung, AllocationType::kYoung,
AllocationOrigin::kGeneratedCode); AllocationOrigin::kGeneratedCode);
} }
...@@ -491,7 +492,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) { ...@@ -491,7 +492,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
DCHECK_EQ(2, args.length()); DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0); CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1); CONVERT_SMI_ARG_CHECKED(flags, 1);
bool double_align = AllocateDoubleAlignFlag::decode(flags); AllocationAlignment alignment =
AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation = bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags); AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize)); CHECK(IsAligned(size, kTaggedSize));
...@@ -499,9 +501,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) { ...@@ -499,9 +501,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
if (!allow_large_object_allocation) { if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize); CHECK(size <= kMaxRegularHeapObjectSize);
} }
return *isolate->factory()->NewFillerObject(size, double_align, return *isolate->factory()->NewFillerObject(
AllocationType::kOld, size, alignment, AllocationType::kOld, AllocationOrigin::kGeneratedCode);
AllocationOrigin::kGeneratedCode);
} }
RUNTIME_FUNCTION(Runtime_AllocateByteArray) { RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
......
...@@ -697,7 +697,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() { ...@@ -697,7 +697,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
const int size_in_tagged = size_in_bytes / kTaggedSize; const int size_in_tagged = size_in_bytes / kTaggedSize;
HeapObject raw_obj = HeapObject raw_obj =
Allocate(SpaceToAllocation(space), size_in_bytes, kWordAligned); Allocate(SpaceToAllocation(space), size_in_bytes, kTaggedAligned);
raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj)); raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
MemsetTagged(raw_obj.RawField(kTaggedSize), MemsetTagged(raw_obj.RawField(kTaggedSize),
Smi::uninitialized_deserialization_value(), size_in_tagged - 1); Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
......
...@@ -114,7 +114,7 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) { ...@@ -114,7 +114,7 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
} }
// Defer objects with special alignment requirements until the filler roots // Defer objects with special alignment requirements until the filler roots
// are serialized. // are serialized.
return HeapObject::RequiredAlignment(object.map()) != kWordAligned; return HeapObject::RequiredAlignment(object.map()) != kTaggedAligned;
} }
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache( bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
......
...@@ -50,11 +50,11 @@ void AllocateSomeObjects(LocalHeap* local_heap) { ...@@ -50,11 +50,11 @@ void AllocateSomeObjects(LocalHeap* local_heap) {
for (int i = 0; i < kNumIterations; i++) { for (int i = 0; i < kNumIterations; i++) {
Address address = local_heap->AllocateRawOrFail( Address address = local_heap->AllocateRawOrFail(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
CreateFixedArray(local_heap->heap(), address, kSmallObjectSize); CreateFixedArray(local_heap->heap(), address, kSmallObjectSize);
address = local_heap->AllocateRawOrFail( address = local_heap->AllocateRawOrFail(
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
CreateFixedArray(local_heap->heap(), address, kMediumObjectSize); CreateFixedArray(local_heap->heap(), address, kMediumObjectSize);
if (i % 10 == 0) { if (i % 10 == 0) {
local_heap->Safepoint(); local_heap->Safepoint();
...@@ -247,7 +247,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread { ...@@ -247,7 +247,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
for (int i = 0; i < kNumIterations; i++) { for (int i = 0; i < kNumIterations; i++) {
AllocationResult result = local_heap.AllocateRaw( AllocationResult result = local_heap.AllocateRaw(
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
if (result.IsRetry()) { if (result.IsRetry()) {
local_heap.TryPerformCollection(); local_heap.TryPerformCollection();
} else { } else {
...@@ -322,12 +322,12 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread { ...@@ -322,12 +322,12 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread {
} }
Address address = local_heap.AllocateRawOrFail( Address address = local_heap.AllocateRawOrFail(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
objects_->push_back(address); objects_->push_back(address);
CreateFixedArray(heap_, address, kSmallObjectSize); CreateFixedArray(heap_, address, kSmallObjectSize);
address = local_heap.AllocateRawOrFail( address = local_heap.AllocateRawOrFail(
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime, kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned); AllocationAlignment::kTaggedAligned);
objects_->push_back(address); objects_->push_back(address);
CreateFixedArray(heap_, address, kMediumObjectSize); CreateFixedArray(heap_, address, kMediumObjectSize);
} }
......
...@@ -1757,7 +1757,7 @@ HEAP_TEST(TestSizeOfObjects) { ...@@ -1757,7 +1757,7 @@ HEAP_TEST(TestSizeOfObjects) {
TEST(TestAlignmentCalculations) { TEST(TestAlignmentCalculations) {
// Maximum fill amounts are consistent. // Maximum fill amounts are consistent.
int maximum_double_misalignment = kDoubleSize - kTaggedSize; int maximum_double_misalignment = kDoubleSize - kTaggedSize;
int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned); int max_word_fill = Heap::GetMaximumFillToAlign(kTaggedAligned);
CHECK_EQ(0, max_word_fill); CHECK_EQ(0, max_word_fill);
int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned); int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned);
CHECK_EQ(maximum_double_misalignment, max_double_fill); CHECK_EQ(maximum_double_misalignment, max_double_fill);
...@@ -1768,9 +1768,9 @@ TEST(TestAlignmentCalculations) { ...@@ -1768,9 +1768,9 @@ TEST(TestAlignmentCalculations) {
int fill = 0; int fill = 0;
// Word alignment never requires fill. // Word alignment never requires fill.
fill = Heap::GetFillToAlign(base, kWordAligned); fill = Heap::GetFillToAlign(base, kTaggedAligned);
CHECK_EQ(0, fill); CHECK_EQ(0, fill);
fill = Heap::GetFillToAlign(base + kTaggedSize, kWordAligned); fill = Heap::GetFillToAlign(base + kTaggedSize, kTaggedAligned);
CHECK_EQ(0, fill); CHECK_EQ(0, fill);
// No fill is required when address is double aligned. // No fill is required when address is double aligned.
...@@ -1789,7 +1789,8 @@ TEST(TestAlignmentCalculations) { ...@@ -1789,7 +1789,8 @@ TEST(TestAlignmentCalculations) {
static HeapObject NewSpaceAllocateAligned(int size, static HeapObject NewSpaceAllocateAligned(int size,
AllocationAlignment alignment) { AllocationAlignment alignment) {
Heap* heap = CcTest::heap(); Heap* heap = CcTest::heap();
AllocationResult allocation = heap->new_space()->AllocateRaw(size, alignment); AllocationResult allocation =
heap->new_space()->AllocateRawAligned(size, alignment);
HeapObject obj; HeapObject obj;
allocation.To(&obj); allocation.To(&obj);
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo); heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
...@@ -1802,7 +1803,7 @@ static Address AlignNewSpace(AllocationAlignment alignment, int offset) { ...@@ -1802,7 +1803,7 @@ static Address AlignNewSpace(AllocationAlignment alignment, int offset) {
int fill = Heap::GetFillToAlign(*top_addr, alignment); int fill = Heap::GetFillToAlign(*top_addr, alignment);
int allocation = fill + offset; int allocation = fill + offset;
if (allocation) { if (allocation) {
NewSpaceAllocateAligned(allocation, kWordAligned); NewSpaceAllocateAligned(allocation, kTaggedAligned);
} }
return *top_addr; return *top_addr;
} }
...@@ -1870,7 +1871,7 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) { ...@@ -1870,7 +1871,7 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
int fill = Heap::GetFillToAlign(*top_addr, alignment); int fill = Heap::GetFillToAlign(*top_addr, alignment);
int allocation = fill + offset; int allocation = fill + offset;
if (allocation) { if (allocation) {
OldSpaceAllocateAligned(allocation, kWordAligned); OldSpaceAllocateAligned(allocation, kTaggedAligned);
} }
Address top = *top_addr; Address top = *top_addr;
// Now force the remaining allocation onto the free list. // Now force the remaining allocation onto the free list.
...@@ -3721,8 +3722,7 @@ TEST(Regress169928) { ...@@ -3721,8 +3722,7 @@ TEST(Regress169928) {
// fill pointer value. // fill pointer value.
HeapObject obj; HeapObject obj;
AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw( AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
AllocationMemento::kSize + kTaggedSize, AllocationMemento::kSize + kTaggedSize, kTaggedAligned);
AllocationAlignment::kWordAligned);
CHECK(allocation.To(&obj)); CHECK(allocation.To(&obj));
Address addr_obj = obj.address(); Address addr_obj = obj.address();
CcTest::heap()->CreateFillerObjectAt(addr_obj, CcTest::heap()->CreateFillerObjectAt(addr_obj,
......
...@@ -15,7 +15,7 @@ namespace internal { ...@@ -15,7 +15,7 @@ namespace internal {
namespace heap { namespace heap {
static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) { static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRaw( AllocationResult result = heap->old_space()->AllocateRawAligned(
static_cast<int>(size_in_bytes), kDoubleAligned); static_cast<int>(size_in_bytes), kDoubleAligned);
Address adr = result.ToObjectChecked().address(); Address adr = result.ToObjectChecked().address();
return adr; return adr;
...@@ -38,10 +38,9 @@ static void VerifyIterable(v8::internal::Address base, ...@@ -38,10 +38,9 @@ static void VerifyIterable(v8::internal::Address base,
} }
} }
static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab, static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
intptr_t size_in_bytes, intptr_t size_in_bytes,
AllocationAlignment alignment = kWordAligned) { AllocationAlignment alignment = kTaggedAligned) {
HeapObject obj; HeapObject obj;
AllocationResult result = AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment); lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
...@@ -53,7 +52,6 @@ static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab, ...@@ -53,7 +52,6 @@ static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
return false; return false;
} }
TEST(InvalidLab) { TEST(InvalidLab) {
LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer(); LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
CHECK(!lab.IsValid()); CHECK(!lab.IsValid());
...@@ -246,16 +244,16 @@ TEST(MergeFailed) { ...@@ -246,16 +244,16 @@ TEST(MergeFailed) {
} }
} }
#ifdef V8_HOST_ARCH_32_BIT
TEST(AllocateAligned) { TEST(AllocateAligned) {
// The test works only for configurations with 32-bit tagged values.
if (kTaggedSize != kUInt32Size) return;
CcTest::InitializeVM(); CcTest::InitializeVM();
Heap* heap = CcTest::heap(); Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB; const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize); Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize; Address limit = base + kLabSize;
std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = { std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = {
std::make_pair(116, kWordAligned), std::make_pair(64, kDoubleAligned)}; std::make_pair(116, kTaggedAligned), std::make_pair(64, kDoubleAligned)};
std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw, std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw,
sizes_raw + 2); sizes_raw + 2);
intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864}; intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864};
...@@ -275,7 +273,6 @@ TEST(AllocateAligned) { ...@@ -275,7 +273,6 @@ TEST(AllocateAligned) {
} }
VerifyIterable(base, limit, expected_sizes); VerifyIterable(base, limit, expected_sizes);
} }
#endif // V8_HOST_ARCH_32_BIT
} // namespace heap } // namespace heap
} // namespace internal } // namespace internal
......
...@@ -282,10 +282,9 @@ TEST(NewSpace) { ...@@ -282,10 +282,9 @@ TEST(NewSpace) {
CHECK(new_space.MaximumCapacity()); CHECK(new_space.MaximumCapacity());
while (new_space.Available() >= kMaxRegularHeapObjectSize) { while (new_space.Available() >= kMaxRegularHeapObjectSize) {
CHECK(new_space.Contains(new_space CHECK(new_space.Contains(
.AllocateRaw(kMaxRegularHeapObjectSize, new_space.AllocateRaw(kMaxRegularHeapObjectSize, kTaggedAligned)
AllocationAlignment::kWordAligned) .ToObjectChecked()));
.ToObjectChecked()));
} }
new_space.TearDown(); new_space.TearDown();
...@@ -329,7 +328,7 @@ TEST(OldLargeObjectSpace) { ...@@ -329,7 +328,7 @@ TEST(OldLargeObjectSpace) {
CHECK(lo->Contains(ho)); CHECK(lo->Contains(ho));
CHECK_EQ(0, Heap::GetFillToAlign(ho.address(), kWordAligned)); CHECK_EQ(0, Heap::GetFillToAlign(ho.address(), kTaggedAligned));
// All large objects have the same alignment because they start at the // All large objects have the same alignment because they start at the
// same offset within a page. Fixed double arrays have the most strict // same offset within a page. Fixed double arrays have the most strict
// alignment requirements. // alignment requirements.
...@@ -409,7 +408,7 @@ TEST(SizeOfInitialHeap) { ...@@ -409,7 +408,7 @@ TEST(SizeOfInitialHeap) {
#endif // DEBUG #endif // DEBUG
static HeapObject AllocateUnaligned(NewSpace* space, int size) { static HeapObject AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kWordAligned); AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
CHECK(!allocation.IsRetry()); CHECK(!allocation.IsRetry());
HeapObject filler; HeapObject filler;
CHECK(allocation.To(&filler)); CHECK(allocation.To(&filler));
...@@ -419,7 +418,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) { ...@@ -419,7 +418,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) {
} }
static HeapObject AllocateUnaligned(PagedSpace* space, int size) { static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kWordAligned); AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
CHECK(!allocation.IsRetry()); CHECK(!allocation.IsRetry());
HeapObject filler; HeapObject filler;
CHECK(allocation.To(&filler)); CHECK(allocation.To(&filler));
...@@ -595,7 +594,8 @@ HEAP_TEST(Regress777177) { ...@@ -595,7 +594,8 @@ HEAP_TEST(Regress777177) {
// Ensure a new linear allocation area on a fresh page. // Ensure a new linear allocation area on a fresh page.
AlwaysAllocateScopeForTesting always_allocate(heap); AlwaysAllocateScopeForTesting always_allocate(heap);
heap::SimulateFullSpace(old_space); heap::SimulateFullSpace(old_space);
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned); AllocationResult result =
old_space->AllocateRaw(filler_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked(); HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), filler_size, heap->CreateFillerObjectAt(obj.address(), filler_size,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
...@@ -605,7 +605,7 @@ HEAP_TEST(Regress777177) { ...@@ -605,7 +605,7 @@ HEAP_TEST(Regress777177) {
// Allocate all bytes of the linear allocation area. This moves top_ and // Allocate all bytes of the linear allocation area. This moves top_ and
// top_on_previous_step_ to the next page. // top_on_previous_step_ to the next page.
AllocationResult result = AllocationResult result =
old_space->AllocateRaw(max_object_size, kWordAligned); old_space->AllocateRaw(max_object_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked(); HeapObject obj = result.ToObjectChecked();
// Simulate allocation folding moving the top pointer back. // Simulate allocation folding moving the top pointer back.
old_space->SetTopAndLimit(obj.address(), old_space->limit()); old_space->SetTopAndLimit(obj.address(), old_space->limit());
...@@ -613,7 +613,8 @@ HEAP_TEST(Regress777177) { ...@@ -613,7 +613,8 @@ HEAP_TEST(Regress777177) {
{ {
// This triggers assert in crbug.com/777177. // This triggers assert in crbug.com/777177.
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned); AllocationResult result =
old_space->AllocateRaw(filler_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked(); HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), filler_size, heap->CreateFillerObjectAt(obj.address(), filler_size,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
...@@ -644,7 +645,7 @@ HEAP_TEST(Regress791582) { ...@@ -644,7 +645,7 @@ HEAP_TEST(Regress791582) {
{ {
AllocationResult result = AllocationResult result =
new_space->AllocateRaw(until_page_end, kWordAligned); new_space->AllocateRaw(until_page_end, kTaggedAligned);
HeapObject obj = result.ToObjectChecked(); HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), until_page_end, heap->CreateFillerObjectAt(obj.address(), until_page_end,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
...@@ -654,7 +655,7 @@ HEAP_TEST(Regress791582) { ...@@ -654,7 +655,7 @@ HEAP_TEST(Regress791582) {
{ {
// This triggers assert in crbug.com/791582 // This triggers assert in crbug.com/791582
AllocationResult result = new_space->AllocateRaw(256, kWordAligned); AllocationResult result = new_space->AllocateRaw(256, kTaggedAligned);
HeapObject obj = result.ToObjectChecked(); HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), 256, ClearRecordedSlots::kNo); heap->CreateFillerObjectAt(obj.address(), 256, ClearRecordedSlots::kNo);
} }
...@@ -845,7 +846,7 @@ TEST(ReadOnlySpaceMetrics_OnePage) { ...@@ -845,7 +846,7 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
CHECK_EQ(faked_space->CommittedMemory(), 0); CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0); CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
faked_space->AllocateRaw(16, kWordAligned); faked_space->AllocateRaw(16, kTaggedAligned);
faked_space->ShrinkPages(); faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap); faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
...@@ -955,10 +956,10 @@ TEST(ReadOnlySpaceMetrics_TwoPages) { ...@@ -955,10 +956,10 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
kTaggedSize); kTaggedSize);
CHECK_GT(object_size * 2, CHECK_GT(object_size * 2,
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)); MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
faked_space->AllocateRaw(object_size, kWordAligned); faked_space->AllocateRaw(object_size, kTaggedAligned);
// Then allocate another so it expands the space to two pages. // Then allocate another so it expands the space to two pages.
faked_space->AllocateRaw(object_size, kWordAligned); faked_space->AllocateRaw(object_size, kTaggedAligned);
faked_space->ShrinkPages(); faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap); faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment