Commit 579c3b0f authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[heap][cleanup] Rename kWordAligned to kTaggedAligned

Also introduce USE_ALLOCATION_ALIGNMENT_BOOL constant which is true
only for those configurations that require aligned allocations and
use it for statically falling back to unaligned allocations on those
configurations that do not require aligned allocations.

This is a prerequisite for introducing the real kWordAligned mode for
kSystemPointerSize aligned allocations.

Bug: v8:8875
Change-Id: I155d12435f344324bc1bf19da88ee823c8f2ca6c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3283064Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77918}
parent 55cd8648
......@@ -868,8 +868,27 @@ inline constexpr bool IsSharedAllocationType(AllocationType kind) {
kind == AllocationType::kSharedMap;
}
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum AllocationAlignment {
// The allocated address is kTaggedSize aligned (this is default for most of
// the allocations).
kTaggedAligned,
// The allocated address is kDoubleSize aligned.
kDoubleAligned,
// The (allocated address + kTaggedSize) is kDoubleSize aligned.
kDoubleUnaligned
};
#ifdef V8_HOST_ARCH_32_BIT
#define USE_ALLOCATION_ALIGNMENT_BOOL true
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
#define USE_ALLOCATION_ALIGNMENT_BOOL false
#endif // V8_HOST_ARCH_32_BIT
enum class AccessMode { ATOMIC, NON_ATOMIC };
......
......@@ -36,7 +36,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kSmallObjectSize,
......@@ -47,7 +47,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize,
......@@ -58,7 +58,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize,
......@@ -133,7 +133,7 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
......
......@@ -269,7 +269,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
HeapObject NewWithImmortalMap(Map map, AllocationType allocation);
Handle<FixedArray> NewFixedArrayWithFiller(Handle<Map> map, int length,
......@@ -295,7 +295,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
friend TorqueGeneratedFactory<Impl>;
};
......
......@@ -378,10 +378,10 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
return result;
}
Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
Handle<HeapObject> Factory::NewFillerObject(int size,
AllocationAlignment alignment,
AllocationType allocation,
AllocationOrigin origin) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
size, allocation, origin, alignment);
......
......@@ -444,7 +444,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
Handle<HeapObject> NewFillerObject(
int size, bool double_align, AllocationType allocation,
int size, AllocationAlignment alignment, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
......@@ -989,7 +989,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// ------
// Customization points for FactoryBase
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
Isolate* isolate() const {
// Downcast to the privately inherited sub-class using c-style casts to
......
......@@ -207,7 +207,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
alignment == AllocationAlignment::kWordAligned);
alignment == AllocationAlignment::kTaggedAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
......@@ -321,7 +321,7 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
if (allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned &&
alignment == AllocationAlignment::kTaggedAligned &&
size <= MaxRegularHeapObjectSize(allocation) &&
V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
FLAG_gc_interval == -1)) {
......
......@@ -3005,13 +3005,12 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
#ifdef V8_HOST_ARCH_32_BIT
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
STATIC_ASSERT(!USE_ALLOCATION_ALIGNMENT_BOOL ||
(HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) {
case kWordAligned:
case kTaggedAligned:
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
......
......@@ -2078,7 +2078,7 @@ class Heap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode.
......@@ -2087,13 +2087,13 @@ class Heap {
V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
......@@ -2102,7 +2102,7 @@ class Heap {
// returned.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
......@@ -2112,7 +2112,7 @@ class Heap {
// If the allocation still fails after that a fatal error is thrown.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
......
......@@ -85,7 +85,7 @@ AllocationResult EvacuationAllocator::AllocateInLAB(
bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false;
AllocationResult result =
new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
if (result.IsRetry()) {
lab_allocation_will_fail_ = true;
return false;
......
......@@ -57,7 +57,7 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------
// Customization points for FactoryBase.
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
LocalIsolate* isolate() {
// Downcast to the privately inherited sub-class using c-style casts to
......
......@@ -25,7 +25,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
alignment == AllocationAlignment::kWordAligned);
alignment == AllocationAlignment::kTaggedAligned);
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
DCHECK(IsRunning());
......
......@@ -130,14 +130,14 @@ class V8_EXPORT_PRIVATE LocalHeap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
// Allocates an uninitialized object and crashes when object
// cannot be allocated.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
AllocationAlignment alignment = kTaggedAligned);
inline void CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode);
......
......@@ -96,7 +96,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (alignment != kWordAligned) {
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
......
......@@ -611,25 +611,15 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
return AllocationResult::Retry(NEW_SPACE);
}
......
......@@ -393,6 +393,10 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
......@@ -511,10 +515,6 @@ class V8_EXPORT_PRIVATE NewSpace
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
......
......@@ -176,7 +176,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
if (alignment != kWordAligned) {
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
} else {
result = AllocateFastUnaligned(size_in_bytes);
......
......@@ -993,14 +993,10 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
kGCCallbackScheduleIdleGarbageCollection);
}
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
alignment != kWordAligned
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
return result;
}
......
......@@ -692,13 +692,10 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result = alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
AllocationResult result =
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj)) {
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
......
......@@ -967,21 +967,16 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
// static
AllocationAlignment HeapObject::RequiredAlignment(Map map) {
// TODO(bmeurer, v8:4153): We should think about requiring double alignment
// TODO(v8:4153): We should think about requiring double alignment
// in general for ByteArray, since they are used as backing store for typed
// arrays now.
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
#ifdef V8_HOST_ARCH_32_BIT
int instance_type = map.instance_type();
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
// TODO(ishell, v8:8875): Consider using aligned allocations for BigInt.
if (USE_ALLOCATION_ALIGNMENT_BOOL) {
int instance_type = map.instance_type();
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
}
return kTaggedAligned;
}
Address HeapObject::GetFieldAddress(int field_offset) const {
......
......@@ -458,7 +458,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationAlignment alignment =
AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
......@@ -479,9 +480,9 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
// TODO(v8:9472): Until double-aligned allocation is fixed for new-space
// allocations, don't request it.
double_align = false;
alignment = kTaggedAligned;
return *isolate->factory()->NewFillerObject(size, double_align,
return *isolate->factory()->NewFillerObject(size, alignment,
AllocationType::kYoung,
AllocationOrigin::kGeneratedCode);
}
......@@ -491,7 +492,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationAlignment alignment =
AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
......@@ -499,9 +501,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kOld,
AllocationOrigin::kGeneratedCode);
return *isolate->factory()->NewFillerObject(
size, alignment, AllocationType::kOld, AllocationOrigin::kGeneratedCode);
}
RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
......
......@@ -697,7 +697,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
const int size_in_tagged = size_in_bytes / kTaggedSize;
HeapObject raw_obj =
Allocate(SpaceToAllocation(space), size_in_bytes, kWordAligned);
Allocate(SpaceToAllocation(space), size_in_bytes, kTaggedAligned);
raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
MemsetTagged(raw_obj.RawField(kTaggedSize),
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
......
......@@ -114,7 +114,7 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
}
// Defer objects with special alignment requirements until the filler roots
// are serialized.
return HeapObject::RequiredAlignment(object.map()) != kWordAligned;
return HeapObject::RequiredAlignment(object.map()) != kTaggedAligned;
}
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
......
......@@ -50,11 +50,11 @@ void AllocateSomeObjects(LocalHeap* local_heap) {
for (int i = 0; i < kNumIterations; i++) {
Address address = local_heap->AllocateRawOrFail(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
CreateFixedArray(local_heap->heap(), address, kSmallObjectSize);
address = local_heap->AllocateRawOrFail(
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
CreateFixedArray(local_heap->heap(), address, kMediumObjectSize);
if (i % 10 == 0) {
local_heap->Safepoint();
......@@ -247,7 +247,7 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
for (int i = 0; i < kNumIterations; i++) {
AllocationResult result = local_heap.AllocateRaw(
kLargeObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
if (result.IsRetry()) {
local_heap.TryPerformCollection();
} else {
......@@ -322,12 +322,12 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread {
}
Address address = local_heap.AllocateRawOrFail(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
objects_->push_back(address);
CreateFixedArray(heap_, address, kSmallObjectSize);
address = local_heap.AllocateRawOrFail(
kMediumObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kWordAligned);
AllocationAlignment::kTaggedAligned);
objects_->push_back(address);
CreateFixedArray(heap_, address, kMediumObjectSize);
}
......
......@@ -1757,7 +1757,7 @@ HEAP_TEST(TestSizeOfObjects) {
TEST(TestAlignmentCalculations) {
// Maximum fill amounts are consistent.
int maximum_double_misalignment = kDoubleSize - kTaggedSize;
int max_word_fill = Heap::GetMaximumFillToAlign(kWordAligned);
int max_word_fill = Heap::GetMaximumFillToAlign(kTaggedAligned);
CHECK_EQ(0, max_word_fill);
int max_double_fill = Heap::GetMaximumFillToAlign(kDoubleAligned);
CHECK_EQ(maximum_double_misalignment, max_double_fill);
......@@ -1768,9 +1768,9 @@ TEST(TestAlignmentCalculations) {
int fill = 0;
// Word alignment never requires fill.
fill = Heap::GetFillToAlign(base, kWordAligned);
fill = Heap::GetFillToAlign(base, kTaggedAligned);
CHECK_EQ(0, fill);
fill = Heap::GetFillToAlign(base + kTaggedSize, kWordAligned);
fill = Heap::GetFillToAlign(base + kTaggedSize, kTaggedAligned);
CHECK_EQ(0, fill);
// No fill is required when address is double aligned.
......@@ -1789,7 +1789,8 @@ TEST(TestAlignmentCalculations) {
static HeapObject NewSpaceAllocateAligned(int size,
AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
AllocationResult allocation = heap->new_space()->AllocateRaw(size, alignment);
AllocationResult allocation =
heap->new_space()->AllocateRawAligned(size, alignment);
HeapObject obj;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
......@@ -1802,7 +1803,7 @@ static Address AlignNewSpace(AllocationAlignment alignment, int offset) {
int fill = Heap::GetFillToAlign(*top_addr, alignment);
int allocation = fill + offset;
if (allocation) {
NewSpaceAllocateAligned(allocation, kWordAligned);
NewSpaceAllocateAligned(allocation, kTaggedAligned);
}
return *top_addr;
}
......@@ -1870,7 +1871,7 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
int fill = Heap::GetFillToAlign(*top_addr, alignment);
int allocation = fill + offset;
if (allocation) {
OldSpaceAllocateAligned(allocation, kWordAligned);
OldSpaceAllocateAligned(allocation, kTaggedAligned);
}
Address top = *top_addr;
// Now force the remaining allocation onto the free list.
......@@ -3721,8 +3722,7 @@ TEST(Regress169928) {
// fill pointer value.
HeapObject obj;
AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
AllocationMemento::kSize + kTaggedSize,
AllocationAlignment::kWordAligned);
AllocationMemento::kSize + kTaggedSize, kTaggedAligned);
CHECK(allocation.To(&obj));
Address addr_obj = obj.address();
CcTest::heap()->CreateFillerObjectAt(addr_obj,
......
......@@ -15,7 +15,7 @@ namespace internal {
namespace heap {
static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRaw(
AllocationResult result = heap->old_space()->AllocateRawAligned(
static_cast<int>(size_in_bytes), kDoubleAligned);
Address adr = result.ToObjectChecked().address();
return adr;
......@@ -38,10 +38,9 @@ static void VerifyIterable(v8::internal::Address base,
}
}
static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
intptr_t size_in_bytes,
AllocationAlignment alignment = kWordAligned) {
AllocationAlignment alignment = kTaggedAligned) {
HeapObject obj;
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
......@@ -53,7 +52,6 @@ static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
return false;
}
TEST(InvalidLab) {
LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
CHECK(!lab.IsValid());
......@@ -246,16 +244,16 @@ TEST(MergeFailed) {
}
}
#ifdef V8_HOST_ARCH_32_BIT
TEST(AllocateAligned) {
// The test works only for configurations with 32-bit tagged values.
if (kTaggedSize != kUInt32Size) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
Address base = AllocateLabBackingStore(heap, kLabSize);
Address limit = base + kLabSize;
std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = {
std::make_pair(116, kWordAligned), std::make_pair(64, kDoubleAligned)};
std::make_pair(116, kTaggedAligned), std::make_pair(64, kDoubleAligned)};
std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw,
sizes_raw + 2);
intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864};
......@@ -275,7 +273,6 @@ TEST(AllocateAligned) {
}
VerifyIterable(base, limit, expected_sizes);
}
#endif // V8_HOST_ARCH_32_BIT
} // namespace heap
} // namespace internal
......
......@@ -282,10 +282,9 @@ TEST(NewSpace) {
CHECK(new_space.MaximumCapacity());
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
CHECK(new_space.Contains(new_space
.AllocateRaw(kMaxRegularHeapObjectSize,
AllocationAlignment::kWordAligned)
.ToObjectChecked()));
CHECK(new_space.Contains(
new_space.AllocateRaw(kMaxRegularHeapObjectSize, kTaggedAligned)
.ToObjectChecked()));
}
new_space.TearDown();
......@@ -329,7 +328,7 @@ TEST(OldLargeObjectSpace) {
CHECK(lo->Contains(ho));
CHECK_EQ(0, Heap::GetFillToAlign(ho.address(), kWordAligned));
CHECK_EQ(0, Heap::GetFillToAlign(ho.address(), kTaggedAligned));
// All large objects have the same alignment because they start at the
// same offset within a page. Fixed double arrays have the most strict
// alignment requirements.
......@@ -409,7 +408,7 @@ TEST(SizeOfInitialHeap) {
#endif // DEBUG
static HeapObject AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kWordAligned);
AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
CHECK(!allocation.IsRetry());
HeapObject filler;
CHECK(allocation.To(&filler));
......@@ -419,7 +418,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) {
}
static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
AllocationResult allocation = space->AllocateRaw(size, kWordAligned);
AllocationResult allocation = space->AllocateRaw(size, kTaggedAligned);
CHECK(!allocation.IsRetry());
HeapObject filler;
CHECK(allocation.To(&filler));
......@@ -595,7 +594,8 @@ HEAP_TEST(Regress777177) {
// Ensure a new linear allocation area on a fresh page.
AlwaysAllocateScopeForTesting always_allocate(heap);
heap::SimulateFullSpace(old_space);
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
AllocationResult result =
old_space->AllocateRaw(filler_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), filler_size,
ClearRecordedSlots::kNo);
......@@ -605,7 +605,7 @@ HEAP_TEST(Regress777177) {
// Allocate all bytes of the linear allocation area. This moves top_ and
// top_on_previous_step_ to the next page.
AllocationResult result =
old_space->AllocateRaw(max_object_size, kWordAligned);
old_space->AllocateRaw(max_object_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked();
// Simulate allocation folding moving the top pointer back.
old_space->SetTopAndLimit(obj.address(), old_space->limit());
......@@ -613,7 +613,8 @@ HEAP_TEST(Regress777177) {
{
// This triggers assert in crbug.com/777177.
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
AllocationResult result =
old_space->AllocateRaw(filler_size, kTaggedAligned);
HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), filler_size,
ClearRecordedSlots::kNo);
......@@ -644,7 +645,7 @@ HEAP_TEST(Regress791582) {
{
AllocationResult result =
new_space->AllocateRaw(until_page_end, kWordAligned);
new_space->AllocateRaw(until_page_end, kTaggedAligned);
HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), until_page_end,
ClearRecordedSlots::kNo);
......@@ -654,7 +655,7 @@ HEAP_TEST(Regress791582) {
{
// This triggers assert in crbug.com/791582
AllocationResult result = new_space->AllocateRaw(256, kWordAligned);
AllocationResult result = new_space->AllocateRaw(256, kTaggedAligned);
HeapObject obj = result.ToObjectChecked();
heap->CreateFillerObjectAt(obj.address(), 256, ClearRecordedSlots::kNo);
}
......@@ -845,7 +846,7 @@ TEST(ReadOnlySpaceMetrics_OnePage) {
CHECK_EQ(faked_space->CommittedMemory(), 0);
CHECK_EQ(faked_space->CommittedPhysicalMemory(), 0);
faked_space->AllocateRaw(16, kWordAligned);
faked_space->AllocateRaw(16, kTaggedAligned);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
......@@ -955,10 +956,10 @@ TEST(ReadOnlySpaceMetrics_TwoPages) {
kTaggedSize);
CHECK_GT(object_size * 2,
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
faked_space->AllocateRaw(object_size, kWordAligned);
faked_space->AllocateRaw(object_size, kTaggedAligned);
// Then allocate another so it expands the space to two pages.
faked_space->AllocateRaw(object_size, kWordAligned);
faked_space->AllocateRaw(object_size, kTaggedAligned);
faked_space->ShrinkPages();
faked_space->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment