Commit 39f419f0 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Move allocation logic to SpaceWithLinearArea

NewSpace and PagedSpace both inherit from SpaceWithLinearArena and
implement allocation logic on top of it. The parts of the allocation
path that deal specifically with the linear allocation area are
equivalent (only minor syntactic differences between them).

This CL refactors the allocation from a linear allocation area out of
NewSpace and PagedSpace and moves it to SpaceWithLinearArea. This
eliminates code duplication and keeps everything generally still working
the same.

This is done as part of an effort to create a stable NewSpace interface
to allow introducing an alternative paged new space.

Bug: v8:12612
Change-Id: Ie24345a2d51f6e67ebe8a1d67e586038f7aec8de
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3578547Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79955}
parent 7415740d
......@@ -4156,10 +4156,12 @@ double Heap::MonotonicallyIncreasingTimeInMs() const {
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
#if DEBUG
void Heap::VerifyNewSpaceTop() {
if (!new_space()) return;
new_space()->VerifyTop();
}
#endif // DEBUG
bool Heap::IdleNotification(int idle_time_in_ms) {
return IdleNotification(
......
......@@ -697,7 +697,9 @@ class Heap {
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const;
#if DEBUG
void VerifyNewSpaceTop();
#endif // DEBUG
void RecordStats(HeapStats* stats, bool take_snapshot = false);
......
......@@ -85,74 +85,6 @@ HeapObject SemiSpaceObjectIterator::Next() {
// -----------------------------------------------------------------------------
// NewSpace
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_single_generation);
DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyTop();
#endif
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
}
return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
: result;
}
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return AllocationResult::FromObject(obj);
}
AllocationResult NewSpace::AllocateFastAligned(
int size_in_bytes, int* result_aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
Address top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Failure();
}
HeapObject obj = HeapObject::FromAddress(
allocation_info_->IncrementTop(aligned_size_in_bytes));
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return AllocationResult::FromObject(obj);
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
......
......@@ -628,7 +628,14 @@ bool NewSpace::AddParkedAllocationBuffer(int size_in_bytes,
}
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
VerifyTop();
#endif // DEBUG
AdvanceAllocationObservers();
Address old_top = allocation_info_->top();
......@@ -636,26 +643,29 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes <= high) {
UpdateInlineAllocationLimit(aligned_size_in_bytes);
return true;
}
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
// When we cannot grow NewSpace anymore we query for parked allocations.
if (!FLAG_allocation_buffer_parking ||
!AddParkedAllocationBuffer(size_in_bytes, alignment))
return false;
}
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
// When we cannot grow NewSpace anymore we query for parked allocations.
if (!FLAG_allocation_buffer_parking ||
!AddParkedAllocationBuffer(size_in_bytes, alignment))
return false;
old_top = allocation_info_->top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
old_top = allocation_info_->top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
if (out_max_aligned_size) {
*out_max_aligned_size = aligned_size_in_bytes;
}
DCHECK(old_top + aligned_size_in_bytes <= high);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
......@@ -673,54 +683,6 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, aligned_size_in_bytes);
return result;
}
void NewSpace::MakeLinearAllocationAreaIterable() {
Address to_top = top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
......@@ -736,10 +698,9 @@ void NewSpace::FreeLinearAllocationArea() {
UpdateInlineAllocationLimit(0);
}
#if DEBUG
void NewSpace::VerifyTop() const {
// Ensure validity of LAB: start <= top <= limit
DCHECK_LE(allocation_info_->start(), allocation_info_->top());
DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
SpaceWithLinearArea::VerifyTop();
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
......@@ -750,6 +711,7 @@ void NewSpace::VerifyTop() const {
DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
}
#endif // DEBUG
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
......
......@@ -379,7 +379,9 @@ class V8_EXPORT_PRIVATE NewSpace final
return to_space_.minimum_capacity();
}
#if DEBUG
void VerifyTop() const;
#endif // DEBUG
Address original_top_acquire() const {
return original_top_.load(std::memory_order_acquire);
......@@ -397,18 +399,10 @@ class V8_EXPORT_PRIVATE NewSpace final
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
......@@ -518,22 +512,9 @@ class V8_EXPORT_PRIVATE NewSpace final
ParkedAllocationBuffersVector parked_allocation_buffers_;
// Internal allocation methods.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final;
bool SupportsAllocationObserver() const override { return true; }
friend class SemiSpaceObjectIterator;
......
......@@ -86,107 +86,6 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
return false;
}
bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure();
}
return AllocationResult::FromObject(
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
}
AllocationResult PagedSpace::AllocateFastAligned(
int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_->CanIncrementTop(aligned_size)) {
return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
return AllocationResult::FromObject(obj);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Failure();
}
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
DCHECK(!result.IsFailure());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
DCHECK_EQ(identity(), OLD_SPACE);
int allocation_size = size_in_bytes;
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLabMain(allocation_size, origin)) {
return AllocationResult::Failure();
}
int aligned_size_in_bytes;
AllocationResult result =
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
DCHECK(!result.IsFailure());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, allocation_size);
return result;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
} else {
result = AllocateFastUnaligned(size_in_bytes);
}
return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
: result;
}
} // namespace internal
} // namespace v8
......
......@@ -1008,25 +1008,6 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
return false;
}
AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
// running.
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
AllocationResult result =
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
return result;
}
void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
Address end) {
DCHECK_LE(page->address(), start);
......@@ -1049,6 +1030,31 @@ void PagedSpace::ReduceActiveSystemPages(
MemoryAllocator::GetCommitPageSize());
}
bool PagedSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
// running.
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
}
// -----------------------------------------------------------------------------
// MapSpace implementation
......
......@@ -143,23 +143,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// due to being too small to use for allocation.
virtual size_t Waste() const { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
......@@ -364,11 +347,6 @@ class V8_EXPORT_PRIVATE PagedSpace
return !is_compaction_space();
}
// Slow path of allocation function
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......@@ -395,20 +373,9 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<std::pair<Address, size_t>> ExpandBackground(
size_t size_in_bytes);
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline AllocationResult AllocateFastUnaligned(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline AllocationResult AllocateFastAligned(int size_in_bytes,
int* aligned_size_in_bytes,
AllocationAlignment alignment);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) final;
V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
size_t size_in_bytes, AllocationOrigin origin);
......
......@@ -192,6 +192,122 @@ MemoryChunk* MemoryChunkIterator::Next() {
return chunk;
}
AllocationResult SpaceWithLinearArea::AllocateFastUnaligned(
int size_in_bytes, AllocationOrigin origin) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return AllocationResult::FromObject(obj);
}
AllocationResult SpaceWithLinearArea::AllocateFastAligned(
int size_in_bytes, int* result_aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
Address top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Failure();
}
HeapObject obj = HeapObject::FromAddress(
allocation_info_->IncrementTop(aligned_size_in_bytes));
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return AllocationResult::FromObject(obj);
}
AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
}
return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
: result;
}
AllocationResult SpaceWithLinearArea::AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
int max_aligned_size;
if (!EnsureAllocation(size_in_bytes, kTaggedAligned, origin,
&max_aligned_size)) {
return AllocationResult::Failure();
}
DCHECK_EQ(max_aligned_size, size_in_bytes);
DCHECK_LE(allocation_info_->start(), allocation_info_->top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
return result;
}
AllocationResult SpaceWithLinearArea::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
int max_aligned_size;
if (!EnsureAllocation(size_in_bytes, alignment, origin, &max_aligned_size)) {
return AllocationResult::Failure();
}
DCHECK_GE(max_aligned_size, size_in_bytes);
DCHECK_LE(allocation_info_->start(), allocation_info_->top());
int aligned_size_in_bytes;
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
DCHECK_GE(max_aligned_size, aligned_size_in_bytes);
DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, max_aligned_size);
return result;
}
AllocationResult SpaceWithLinearArea::AllocateRawSlow(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult result =
USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
return result;
}
} // namespace internal
} // namespace v8
......
......@@ -443,6 +443,14 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
allocation_counter_.NextBytes());
}
#if DEBUG
void SpaceWithLinearArea::VerifyTop() const {
// Ensure validity of LAB: start <= top <= limit
DCHECK_LE(allocation_info_->start(), allocation_info_->top());
DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
}
#endif // DEBUG
int MemoryChunk::FreeListsLength() {
int length = 0;
for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
......
......@@ -514,9 +514,55 @@ class SpaceWithLinearArea : public Space {
void PrintAllocationsOrigins() const;
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
protected:
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin);
// Slow path of allocation function
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
// Writes to `max_aligned_size` the actual number of bytes used for checking
// that there is enough space.
virtual bool EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) = 0;
#if DEBUG
V8_EXPORT_PRIVATE void VerifyTop() const;
#endif // DEBUG
LinearAllocationArea* const allocation_info_;
bool use_lab_ = true;
......
......@@ -5452,8 +5452,9 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
}
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
bool result = heap->code_space()->EnsureLabMain(size_in_bytes,
AllocationOrigin::kRuntime);
bool result = heap->code_space()->EnsureAllocation(
size_in_bytes, AllocationAlignment::kTaggedAligned,
AllocationOrigin::kRuntime, nullptr);
heap->code_space()->UpdateInlineAllocationLimit(0);
return result;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment