Commit 3a05e3cb authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Allow inlining EnsureAllocation

Bug: chromium:1316121, v8:12612
Change-Id: I5b32211f01a97a6aaee52f0285d62a516381f43c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3592954Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80079}
parent cb92ed09
......@@ -1076,7 +1076,7 @@ class Heap {
int gc_flags, GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
void StartIncrementalMarkingIfAllocationLimitIsReached(
V8_EXPORT_PRIVATE void StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
void StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
......
......@@ -72,6 +72,48 @@ bool NewSpace::FromSpaceContains(Object o) const {
return from_space_.Contains(o);
}
V8_INLINE bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
VerifyTop();
#endif // DEBUG
AdvanceAllocationObservers();
Address old_top = allocation_info_->top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
// When we cannot grow NewSpace anymore we query for parked allocations.
if (!FLAG_allocation_buffer_parking ||
!AddParkedAllocationBuffer(size_in_bytes, alignment))
return false;
}
old_top = allocation_info_->top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
if (out_max_aligned_size) {
*out_max_aligned_size = aligned_size_in_bytes;
}
DCHECK(old_top + aligned_size_in_bytes <= high);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator
......
......@@ -675,48 +675,6 @@ bool NewSpace::AddParkedAllocationBuffer(int size_in_bytes,
return false;
}
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
VerifyTop();
#endif // DEBUG
AdvanceAllocationObservers();
Address old_top = allocation_info_->top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
// When we cannot grow NewSpace anymore we query for parked allocations.
if (!FLAG_allocation_buffer_parking ||
!AddParkedAllocationBuffer(size_in_bytes, alignment))
return false;
}
old_top = allocation_info_->top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
if (out_max_aligned_size) {
*out_max_aligned_size = aligned_size_in_bytes;
}
DCHECK(old_top + aligned_size_in_bytes <= high);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
return true;
}
#if DEBUG
void NewSpace::VerifyTop() const {
NewSpaceBase::VerifyTop();
......
......@@ -86,6 +86,31 @@ bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
return false;
}
V8_INLINE bool PagedSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
// running.
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
}
} // namespace internal
} // namespace v8
......
......@@ -1030,31 +1030,6 @@ void PagedSpace::ReduceActiveSystemPages(
MemoryAllocator::GetCommitPageSize());
}
bool PagedSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) {
if (!is_compaction_space()) {
// Start incremental marking before the actual allocation, this allows the
// allocation function to mark the object black when incremental marking is
// running.
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
}
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
if (out_max_aligned_size) {
*out_max_aligned_size = size_in_bytes;
}
if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
}
// -----------------------------------------------------------------------------
// MapSpace implementation
......
......@@ -441,7 +441,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
class V8_EXPORT_PRIVATE CompactionSpace final : public PagedSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
CompactionSpaceKind compaction_space_kind)
......@@ -502,7 +502,7 @@ class CompactionSpaceCollection : public Malloced {
// -----------------------------------------------------------------------------
// Old generation regular object space.
class OldSpace : public PagedSpace {
class OldSpace final : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
......@@ -525,7 +525,7 @@ class OldSpace : public PagedSpace {
// -----------------------------------------------------------------------------
// Old generation code object space.
class CodeSpace : public PagedSpace {
class CodeSpace final : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
......@@ -540,7 +540,7 @@ class CodeSpace : public PagedSpace {
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public PagedSpace {
class MapSpace final : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment