Commit 06b4d0a2 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Fix missing aligned allocation support for LocalHeap

LocalHeap only uses LABs for allocation with size below
kMaxLabObjectSize. Larger allocations used a path that was unaware of
alignment restrictions.

Bring implementations of LocalHeap and regular PagedSpace closer
together by assuming the caller of their free list allocations takes
care of size and alignment adjustments.

Drive-by: Use unaligned allocations when possible which avoids a call
into PreceedWithFiller(size) which would only bail out late for
0-sized fillers.

Bug: chromium:1338687
Change-Id: I0f52c54359326a249b2ebe95bb73184ad95194f0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3721817Reviewed-by: 's avatarNikolaos Papaspyrou <nikolaos@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81338}
parent cf8fc474
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "include/v8-internal.h" #include "include/v8-internal.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/allocation-result.h"
#include "src/heap/concurrent-allocator.h" #include "src/heap/concurrent-allocator.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
...@@ -18,28 +19,31 @@ ...@@ -18,28 +19,31 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
AllocationResult ConcurrentAllocator::AllocateRaw(int object_size, AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap); DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers // TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG #ifdef DEBUG
if (local_heap_) local_heap_->VerifyCurrent(); if (local_heap_) local_heap_->VerifyCurrent();
#endif #endif // DEBUG
if (object_size > kMaxLabObjectSize) { if (size_in_bytes > kMaxLabObjectSize) {
return AllocateOutsideLab(object_size, alignment, origin); return AllocateOutsideLab(size_in_bytes, alignment, origin);
} }
return AllocateInLab(object_size, alignment, origin); AllocationResult result;
} // TODO(v8:12547): We cannot use USE_ALLOCATION_ALIGNMENT_BOOL here as
// JSAtomicsMutex has alignment restrictions that are not optional and is
AllocationResult ConcurrentAllocator::AllocateInLab( // allocated using ConcurrentAllocator in the shared heap.
int object_size, AllocationAlignment alignment, AllocationOrigin origin) { if (alignment != kTaggedAligned) {
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment); result = lab_.AllocateRawAligned(size_in_bytes, alignment);
return allocation.IsFailure() } else {
? AllocateInLabSlow(object_size, alignment, origin) result = lab_.AllocateRawUnaligned(size_in_bytes);
: allocation; }
return result.IsFailure()
? AllocateInLabSlow(size_in_bytes, alignment, origin)
: result;
} }
} // namespace internal } // namespace internal
......
...@@ -127,20 +127,19 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() { ...@@ -127,20 +127,19 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
} }
AllocationResult ConcurrentAllocator::AllocateInLabSlow( AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) { int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) { if (!EnsureLab(origin)) {
return AllocationResult::Failure(); return AllocationResult::Failure();
} }
AllocationResult allocation =
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment); lab_.AllocateRawAligned(size_in_bytes, alignment);
DCHECK(!allocation.IsFailure()); DCHECK(!allocation.IsFailure());
return allocation; return allocation;
} }
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground( auto result = space_->RawAllocateBackground(local_heap_, kMinLabSize,
local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin); kMaxLabSize, origin);
if (!result) return false; if (!result) return false;
if (IsBlackAllocationEnabled()) { if (IsBlackAllocationEnabled()) {
...@@ -161,16 +160,25 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { ...@@ -161,16 +160,25 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
} }
AllocationResult ConcurrentAllocator::AllocateOutsideLab( AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) { int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size, // Conservative estimate as we don't know the alignment of the allocation.
object_size, alignment, origin); const int requested_filler_size = Heap::GetMaximumFillToAlign(alignment);
const int aligned_size_in_bytes = size_in_bytes + requested_filler_size;
auto result = space_->RawAllocateBackground(
local_heap_, aligned_size_in_bytes, aligned_size_in_bytes, origin);
if (!result) return AllocationResult::Failure(); if (!result) return AllocationResult::Failure();
DCHECK_GE(result->second, aligned_size_in_bytes);
HeapObject object = HeapObject::FromAddress(result->first); HeapObject object = HeapObject::FromAddress(result->first);
const int filler_size = Heap::GetFillToAlign(object.address(), alignment);
// Actually align the allocation.
if (filler_size)
object = local_heap_->heap()->PrecedeWithFiller(object, filler_size);
if (IsBlackAllocationEnabled()) { if (IsBlackAllocationEnabled()) {
owning_heap()->incremental_marking()->MarkBlackBackground(object, owning_heap()->incremental_marking()->MarkBlackBackground(object,
object_size); size_in_bytes);
} }
return AllocationResult::FromObject(object); return AllocationResult::FromObject(object);
......
...@@ -33,11 +33,11 @@ class StressConcurrentAllocatorTask : public CancelableTask { ...@@ -33,11 +33,11 @@ class StressConcurrentAllocatorTask : public CancelableTask {
// Allocations are served from a TLAB if possible. // Allocations are served from a TLAB if possible.
class ConcurrentAllocator { class ConcurrentAllocator {
public: public:
static const int kLabSize = 4 * KB; static constexpr int kMinLabSize = 4 * KB;
static const int kMaxLabSize = 32 * KB; static constexpr int kMaxLabSize = 32 * KB;
static const int kMaxLabObjectSize = 2 * KB; static constexpr int kMaxLabObjectSize = 2 * KB;
explicit ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space) ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
: local_heap_(local_heap), : local_heap_(local_heap),
space_(space), space_(space),
lab_(LocalAllocationBuffer::InvalidBuffer()) {} lab_(LocalAllocationBuffer::InvalidBuffer()) {}
...@@ -52,16 +52,21 @@ class ConcurrentAllocator { ...@@ -52,16 +52,21 @@ class ConcurrentAllocator {
void UnmarkLinearAllocationArea(); void UnmarkLinearAllocationArea();
private: private:
V8_EXPORT_PRIVATE AllocationResult AllocateInLabSlow( static_assert(
int object_size, AllocationAlignment alignment, AllocationOrigin origin); kMinLabSize > kMaxLabObjectSize,
"LAB size must be larger than max LAB object size as the fast "
"paths do not consider alignment. The assumption is that any object with "
"size <= kMaxLabObjectSize will fit into a newly allocated LAB of size "
"kLabSize after computing the alignment requirements.");
V8_EXPORT_PRIVATE AllocationResult
AllocateInLabSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
bool EnsureLab(AllocationOrigin origin); bool EnsureLab(AllocationOrigin origin);
inline AllocationResult AllocateInLab(int object_size, V8_EXPORT_PRIVATE AllocationResult
AllocationAlignment alignment, AllocateOutsideLab(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
bool IsBlackAllocationEnabled() const; bool IsBlackAllocationEnabled() const;
......
...@@ -34,7 +34,7 @@ class LinearAllocationArea final { ...@@ -34,7 +34,7 @@ class LinearAllocationArea final {
void ResetStart() { start_ = top_; } void ResetStart() { start_ = top_; }
V8_INLINE bool CanIncrementTop(size_t bytes) { V8_INLINE bool CanIncrementTop(size_t bytes) const {
Verify(); Verify();
return (top_ + bytes) <= limit_; return (top_ + bytes) <= limit_;
} }
......
...@@ -625,11 +625,10 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes, ...@@ -625,11 +625,10 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
} }
base::Optional<std::pair<Address, size_t>> base::Optional<std::pair<Address, size_t>>
PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap, PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
size_t min_size_in_bytes, size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
AllocationOrigin origin) {
DCHECK(!is_compaction_space()); DCHECK(!is_compaction_space());
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE || DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE); identity() == MAP_SPACE);
...@@ -639,7 +638,7 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap, ...@@ -639,7 +638,7 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
base::Optional<std::pair<Address, size_t>> result = base::Optional<std::pair<Address, size_t>> result =
TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes, TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
alignment, origin); origin);
if (result) return result; if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
...@@ -650,8 +649,8 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap, ...@@ -650,8 +649,8 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
RefillFreeList(); RefillFreeList();
// Retry the free list allocation. // Retry the free list allocation.
result = TryAllocationFromFreeListBackground( result = TryAllocationFromFreeListBackground(min_size_in_bytes,
min_size_in_bytes, max_size_in_bytes, alignment, origin); max_size_in_bytes, origin);
if (result) return result; if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) { if (IsSweepingAllowedOnThread(local_heap)) {
...@@ -665,8 +664,8 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap, ...@@ -665,8 +664,8 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
RefillFreeList(); RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) { if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground( result = TryAllocationFromFreeListBackground(min_size_in_bytes,
min_size_in_bytes, max_size_in_bytes, alignment, origin); max_size_in_bytes, origin);
if (result) return result; if (result) return result;
} }
} }
...@@ -675,10 +674,7 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap, ...@@ -675,10 +674,7 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) && if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) { heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
result = ExpandBackground(max_size_in_bytes); result = ExpandBackground(max_size_in_bytes);
if (result) { if (result) return result;
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
}
} }
if (collector->sweeping_in_progress()) { if (collector->sweeping_in_progress()) {
...@@ -690,17 +686,17 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap, ...@@ -690,17 +686,17 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
RefillFreeList(); RefillFreeList();
// Last try to acquire memory from free list. // Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground( return TryAllocationFromFreeListBackground(min_size_in_bytes,
min_size_in_bytes, max_size_in_bytes, alignment, origin); max_size_in_bytes, origin);
} }
return {}; return {};
} }
base::Optional<std::pair<Address, size_t>> base::Optional<std::pair<Address, size_t>>
PagedSpaceBase::TryAllocationFromFreeListBackground( PagedSpaceBase::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t min_size_in_bytes, size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) { AllocationOrigin origin) {
base::MutexGuard lock(&space_mutex_); base::MutexGuard lock(&space_mutex_);
DCHECK_LE(min_size_in_bytes, max_size_in_bytes); DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE || DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
......
...@@ -150,10 +150,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase ...@@ -150,10 +150,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Allocate the requested number of bytes in the space from a background // Allocate the requested number of bytes in the space from a background
// thread. // thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>> V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes, RawAllocateBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes, AllocationOrigin origin);
AllocationAlignment alignment,
AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) { size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0; if (size_in_bytes == 0) return 0;
...@@ -387,7 +385,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase ...@@ -387,7 +385,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>> V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes, TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes, V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
......
...@@ -162,6 +162,14 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned( ...@@ -162,6 +162,14 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
: AllocationResult::FromObject(object); : AllocationResult::FromObject(object);
} }
AllocationResult LocalAllocationBuffer::AllocateRawUnaligned(
int size_in_bytes) {
return allocation_info_.CanIncrementTop(size_in_bytes)
? AllocationResult::FromObject(HeapObject::FromAddress(
allocation_info_.IncrementTop(size_in_bytes)))
: AllocationResult::Failure();
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap, LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result, AllocationResult result,
intptr_t size) { intptr_t size) {
......
...@@ -452,6 +452,8 @@ class LocalAllocationBuffer { ...@@ -452,6 +452,8 @@ class LocalAllocationBuffer {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned( V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment); int size_in_bytes, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes);
inline bool IsValid() { return allocation_info_.top() != kNullAddress; } inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment