Commit 5d8ad53a authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Remove retry space from AllocationResult

There's only a single callsite that performs retries after allocations
which already can determine the proper GC to invoke without requiring
threading the space backwards.

Bug: v8:12615
Change-Id: I5d5d886162b3eca33eb2fe7bde1e113cd08a094c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3468905Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79143}
parent 11960b13
......@@ -17,19 +17,17 @@ namespace internal {
// allocation that can be turned into an object or a failed attempt.
class AllocationResult final {
public:
static AllocationResult Failure(AllocationSpace space) {
return AllocationResult(space);
}
static AllocationResult Failure() { return AllocationResult(); }
static AllocationResult FromObject(HeapObject heap_object) {
return AllocationResult(heap_object);
}
// Empty constructor creates a failed result that will turn into a full
// garbage collection.
AllocationResult() : AllocationResult(AllocationSpace::OLD_SPACE) {}
// Empty constructor creates a failed result. The callsite determines which
// GC to invoke based on the requested allocation.
AllocationResult() = default;
bool IsFailure() const { return object_.IsSmi(); }
bool IsFailure() const { return object_.is_null(); }
template <typename T>
bool To(T* obj) const {
......@@ -53,19 +51,10 @@ class AllocationResult final {
return HeapObject::cast(object_).address();
}
// Returns the space that should be passed to a garbage collection call.
AllocationSpace ToGarbageCollectionSpace() const {
DCHECK(IsFailure());
return static_cast<AllocationSpace>(Smi::ToInt(object_));
}
private:
explicit AllocationResult(AllocationSpace space)
: object_(Smi::FromInt(static_cast<int>(space))) {}
explicit AllocationResult(HeapObject heap_object) : object_(heap_object) {}
Object object_;
HeapObject object_;
};
STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
......
......@@ -122,7 +122,7 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
return AllocationResult::Failure(space_->identity());
return AllocationResult::Failure();
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
......@@ -157,7 +157,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
object_size, alignment, origin);
if (!result) return AllocationResult::Failure(space_->identity());
if (!result) return AllocationResult::Failure();
HeapObject object = HeapObject::FromAddress(result->first);
......
......@@ -84,12 +84,12 @@ AllocationResult EvacuationAllocator::AllocateInLAB(
int object_size, AllocationAlignment alignment) {
AllocationResult allocation;
if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
return AllocationResult::Failure(OLD_SPACE);
return AllocationResult::Failure();
}
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
if (allocation.IsFailure()) {
if (!NewLocalAllocationBuffer()) {
return AllocationResult::Failure(OLD_SPACE);
return AllocationResult::Failure();
} else {
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
CHECK(!allocation.IsFailure());
......
......@@ -194,8 +194,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
return AllocationResult::Failure(space);
return AllocationResult::Failure();
}
}
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
......
......@@ -480,6 +480,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return GarbageCollector::MARK_COMPACTOR;
}
DCHECK(!FLAG_single_generation);
DCHECK(!FLAG_gc_global);
// Default
*reason = nullptr;
return YoungGenerationCollector();
......@@ -5623,6 +5625,26 @@ void Heap::DisableInlineAllocation() {
}
}
namespace {
constexpr AllocationSpace AllocationTypeToGCSpace(AllocationType type) {
switch (type) {
case AllocationType::kYoung:
return NEW_SPACE;
case AllocationType::kOld:
case AllocationType::kCode:
case AllocationType::kMap:
// OLD_SPACE indicates full GC.
return OLD_SPACE;
case AllocationType::kReadOnly:
case AllocationType::kSharedMap:
case AllocationType::kSharedOld:
UNREACHABLE();
}
}
} // namespace
HeapObject Heap::AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment) {
......@@ -5644,7 +5666,7 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath(
if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
} else {
CollectGarbage(alloc.ToGarbageCollectionSpace(),
CollectGarbage(AllocationTypeToGCSpace(allocation),
GarbageCollectionReason::kAllocationFailure);
}
alloc = AllocateRaw(size, allocation, origin, alignment);
......
......@@ -135,11 +135,11 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Failure(identity());
if (page == nullptr) return AllocationResult::Failure();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
UpdatePendingObject(object);
......@@ -171,11 +171,11 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, executable);
if (page == nullptr) return AllocationResult::Failure(identity());
if (page == nullptr) return AllocationResult::Failure();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
......@@ -486,16 +486,16 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
if (page == nullptr) return AllocationResult::Failure(identity());
if (page == nullptr) return AllocationResult::Failure();
// The size of the first object may exceed the capacity.
capacity_ = std::max(capacity_, SizeOfObjects());
......
......@@ -109,7 +109,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure(NEW_SPACE);
return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
......@@ -132,7 +132,7 @@ AllocationResult NewSpace::AllocateFastAligned(
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Failure(NEW_SPACE);
return AllocationResult::Failure();
}
HeapObject obj = HeapObject::FromAddress(
allocation_info_->IncrementTop(aligned_size_in_bytes));
......
......@@ -619,7 +619,7 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
return AllocationResult::Failure(NEW_SPACE);
return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
......@@ -638,7 +638,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Failure(NEW_SPACE);
return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
......
......@@ -95,7 +95,7 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
return AllocationResult::FromObject(
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
......@@ -108,7 +108,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_->CanIncrementTop(aligned_size)) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
......@@ -123,7 +123,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
......@@ -152,7 +152,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLabMain(allocation_size, origin)) {
return AllocationResult::Failure(identity());
return AllocationResult::Failure();
}
int aligned_size_in_bytes;
AllocationResult result =
......
......@@ -140,7 +140,7 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_.CanIncrementTop(aligned_size)) {
return AllocationResult::Failure(NEW_SPACE);
return AllocationResult::Failure();
}
HeapObject object =
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment