Commit 9d4dcce7 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] More explicit fast path for new space allocation

Introduce explicit fast path for allocation from LAB. The slow path
refills the LAB and allocates again. Other changes:

1) Move slow path methods out of the header file
2) AllocateRaw(Aligned|Unaligned) are now private methods. All
allocations need to go through AllocateRaw for NewSpace now.

Bug: v8:10315
Change-Id: Iee2bd7b74aa49be8b20d89fefeb2e087575d532c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2319987Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69061}
parent f32972f8
......@@ -1934,7 +1934,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
AllocateRaw(size, type, AllocationOrigin::kRuntime, align);
#else
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
allocation = new_space()->AllocateRaw(
size, AllocationAlignment::kWordAligned);
} else if (space == RO_SPACE) {
allocation = read_only_space()->AllocateRaw(
size, AllocationAlignment::kWordAligned);
......
......@@ -5,6 +5,8 @@
#ifndef V8_HEAP_NEW_SPACES_INL_H_
#define V8_HEAP_NEW_SPACES_INL_H_
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/new-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/tagged-impl.h"
......@@ -82,33 +84,35 @@ HeapObject SemiSpaceObjectIterator::Next() {
// -----------------------------------------------------------------------------
// NewSpace
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
AllocationResult result;
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
}
if (alignment != kWordAligned) {
result = AllocateFastAligned(size_in_bytes, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
}
top = allocation_info_.top();
filler_size = Heap::GetFillToAlign(top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
if (!result.IsRetry()) {
return result;
} else {
return AllocateRawSlow(size_in_bytes, alignment, origin);
}
}
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
return AllocationResult::Retry();
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
......@@ -118,22 +122,26 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return obj;
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
AllocationResult NewSpace::AllocateFastAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
}
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
top = allocation_info_.top();
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
return AllocationResult::Retry();
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
......@@ -143,30 +151,6 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
return obj;
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
......
......@@ -563,6 +563,91 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
Page::FromAllocationAreaAddress(top_on_previous_step_));
top_on_previous_step_ = top();
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
}
top = allocation_info_.top();
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
}
top = allocation_info_.top();
filler_size = Heap::GetFillToAlign(top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
HeapObject obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
......
......@@ -391,13 +391,6 @@ class V8_EXPORT_PRIVATE NewSpace
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
......@@ -482,6 +475,25 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace from_space_;
VirtualMemory reservation_;
// Internal allocation methods.
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
......
......@@ -33,6 +33,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/execution.h"
......@@ -1712,8 +1713,7 @@ TEST(TestAlignmentCalculations) {
static HeapObject NewSpaceAllocateAligned(int size,
AllocationAlignment alignment) {
Heap* heap = CcTest::heap();
AllocationResult allocation =
heap->new_space()->AllocateRawAligned(size, alignment);
AllocationResult allocation = heap->new_space()->AllocateRaw(size, alignment);
HeapObject obj;
allocation.To(&obj);
heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
......@@ -3662,9 +3662,9 @@ TEST(Regress169928) {
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
HeapObject obj;
AllocationResult allocation =
CcTest::heap()->new_space()->AllocateRawUnaligned(
AllocationMemento::kSize + kTaggedSize);
AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
AllocationMemento::kSize + kTaggedSize,
AllocationAlignment::kWordAligned);
CHECK(allocation.To(&obj));
Address addr_obj = obj.address();
CcTest::heap()->CreateFillerObjectAt(addr_obj,
......
......@@ -274,9 +274,10 @@ TEST(NewSpace) {
CHECK(new_space.MaximumCapacity());
while (new_space.Available() >= kMaxRegularHeapObjectSize) {
CHECK(new_space.Contains(
new_space.AllocateRawUnaligned(kMaxRegularHeapObjectSize)
.ToObjectChecked()));
CHECK(new_space.Contains(new_space
.AllocateRaw(kMaxRegularHeapObjectSize,
AllocationAlignment::kWordAligned)
.ToObjectChecked()));
}
new_space.TearDown();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment