Commit 18de64a1 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Inline fast paths for AllocateRaw() and AllocateRawWith()

- Both paths are now inlined.
- Outline large object allocation, shrinking trampoline a bit.
- Support a fast path for AllocationType::kOld from AllocateRawWith().

Bug: v8:12615, chromium:1293284
Change-Id: I8f0b9aabc6fe47e1eee159c214403ccffea5eeab
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3456082Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79048}
parent a7a0b765
...@@ -241,42 +241,42 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type, ...@@ -241,42 +241,42 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
allocation = tp_heap_->Allocate(size_in_bytes, type, alignment); allocation = tp_heap_->Allocate(size_in_bytes, type, alignment);
} else { } else {
if (AllocationType::kYoung == type) { if (V8_UNLIKELY(large_object)) {
if (large_object) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kOld == type) {
if (large_object) {
allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
DCHECK(AllowCodeAllocation::IsAllowed());
if (large_object) {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
}
} else if (AllocationType::kMap == type) {
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else if (AllocationType::kSharedOld == type) {
allocation =
shared_old_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
} else if (AllocationType::kSharedMap == type) {
allocation = allocation =
shared_map_allocator_->AllocateRaw(size_in_bytes, alignment, origin); AllocateRawLargeInternal(size_in_bytes, type, origin, alignment);
} else { } else {
UNREACHABLE(); switch (type) {
case AllocationType::kYoung:
allocation =
new_space_->AllocateRaw(size_in_bytes, alignment, origin);
break;
case AllocationType::kOld:
allocation =
old_space_->AllocateRaw(size_in_bytes, alignment, origin);
break;
case AllocationType::kCode:
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
DCHECK(AllowCodeAllocation::IsAllowed());
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
break;
case AllocationType::kMap:
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
break;
case AllocationType::kReadOnly:
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
break;
case AllocationType::kSharedMap:
allocation = shared_map_allocator_->AllocateRaw(size_in_bytes,
alignment, origin);
break;
case AllocationType::kSharedOld:
allocation = shared_old_allocator_->AllocateRaw(size_in_bytes,
alignment, origin);
break;
}
} }
} }
...@@ -317,21 +317,15 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation, ...@@ -317,21 +317,15 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK(AllowHandleAllocation::IsAllowed()); DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed()); DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_EQ(gc_state(), NOT_IN_GC); DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap(); if (allocation == AllocationType::kYoung) {
if (allocation == AllocationType::kYoung && auto result = AllocateRaw(size, AllocationType::kYoung, origin, alignment);
alignment == AllocationAlignment::kTaggedAligned && HeapObject object;
size <= MaxRegularHeapObjectSize(allocation) && if (result.To(&object)) return object;
V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
FLAG_gc_interval == -1)) { } else if (allocation == AllocationType::kOld) {
Address* top = heap->NewSpaceAllocationTopAddress(); auto result = AllocateRaw(size, AllocationType::kOld, origin, alignment);
Address* limit = heap->NewSpaceAllocationLimitAddress(); HeapObject object;
if (*limit - *top >= static_cast<unsigned>(size)) { if (result.To(&object)) return object;
DCHECK(IsAligned(size, kTaggedSize));
HeapObject obj = HeapObject::FromAddress(*top);
*top += size;
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
return obj;
}
} }
switch (mode) { switch (mode) {
case kLightRetry: case kLightRetry:
......
...@@ -5648,6 +5648,26 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath( ...@@ -5648,6 +5648,26 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath(
return HeapObject(); return HeapObject();
} }
AllocationResult Heap::AllocateRawLargeInternal(int size_in_bytes,
AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK_GT(size_in_bytes, MaxRegularHeapObjectSize(allocation));
switch (allocation) {
case AllocationType::kYoung:
return new_lo_space_->AllocateRaw(size_in_bytes);
case AllocationType::kOld:
return lo_space_->AllocateRaw(size_in_bytes);
case AllocationType::kCode:
return code_lo_space_->AllocateRaw(size_in_bytes);
case AllocationType::kMap:
case AllocationType::kReadOnly:
case AllocationType::kSharedMap:
case AllocationType::kSharedOld:
UNREACHABLE();
}
}
HeapObject Heap::AllocateRawWithRetryOrFailSlowPath( HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin, int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment) { AllocationAlignment alignment) {
......
...@@ -2089,19 +2089,26 @@ class Heap { ...@@ -2089,19 +2089,26 @@ class Heap {
// hardware and OS allow. This is the single choke-point for allocations // hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to // performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support). // inlined allocations, use the Heap::DisableInlineAllocation() support).
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw( V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
int size_in_bytes, AllocationType allocation, AllocateRaw(int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kTaggedAligned); AllocationAlignment alignment = kTaggedAligned);
// Allocates an uninitialized large object. Used as dispatch by
// `AllocateRaw()` for large objects. Do not call this from anywhere else.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawLargeInternal(int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung) // This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode. // otherwise it falls back to a slower path indicated by the mode.
enum AllocationRetryMode { kLightRetry, kRetryOrFail }; enum AllocationRetryMode { kLightRetry, kRetryOrFail };
template <AllocationRetryMode mode> template <AllocationRetryMode mode>
V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith( V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
int size, AllocationType allocation, AllocateRawWith(int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime, AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kTaggedAligned); AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap. // Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail( V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment