Commit 06b4d0a2 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Fix missing aligned allocation support for LocalHeap

LocalHeap only uses LABs for allocation with size below
kMaxLabObjectSize. Larger allocations used a path that was unaware of
alignment restrictions.

Bring implementations of LocalHeap and regular PagedSpace closer
together by assuming the caller of their free list allocations takes
care of size and alignment adjustments.

Drive-by: Use unaligned allocations when possible which avoids a call
into PreceedWithFiller(size) which would only bail out late for
0-sized fillers.

Bug: chromium:1338687
Change-Id: I0f52c54359326a249b2ebe95bb73184ad95194f0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3721817Reviewed-by: 's avatarNikolaos Papaspyrou <nikolaos@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81338}
parent cf8fc474
......@@ -7,6 +7,7 @@
#include "include/v8-internal.h"
#include "src/common/globals.h"
#include "src/heap/allocation-result.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
......@@ -18,28 +19,31 @@
namespace v8 {
namespace internal {
AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
if (local_heap_) local_heap_->VerifyCurrent();
#endif
#endif // DEBUG
if (object_size > kMaxLabObjectSize) {
return AllocateOutsideLab(object_size, alignment, origin);
if (size_in_bytes > kMaxLabObjectSize) {
return AllocateOutsideLab(size_in_bytes, alignment, origin);
}
return AllocateInLab(object_size, alignment, origin);
}
AllocationResult ConcurrentAllocator::AllocateInLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
return allocation.IsFailure()
? AllocateInLabSlow(object_size, alignment, origin)
: allocation;
AllocationResult result;
// TODO(v8:12547): We cannot use USE_ALLOCATION_ALIGNMENT_BOOL here as
// JSAtomicsMutex has alignment restrictions that are not optional and is
// allocated using ConcurrentAllocator in the shared heap.
if (alignment != kTaggedAligned) {
result = lab_.AllocateRawAligned(size_in_bytes, alignment);
} else {
result = lab_.AllocateRawUnaligned(size_in_bytes);
}
return result.IsFailure()
? AllocateInLabSlow(size_in_bytes, alignment, origin)
: result;
}
} // namespace internal
......
......@@ -127,20 +127,19 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
}
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
return AllocationResult::Failure();
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
AllocationResult allocation =
lab_.AllocateRawAligned(size_in_bytes, alignment);
DCHECK(!allocation.IsFailure());
return allocation;
}
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(
local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
auto result = space_->RawAllocateBackground(local_heap_, kMinLabSize,
kMaxLabSize, origin);
if (!result) return false;
if (IsBlackAllocationEnabled()) {
......@@ -161,16 +160,25 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
}
AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
object_size, alignment, origin);
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
// Conservative estimate as we don't know the alignment of the allocation.
const int requested_filler_size = Heap::GetMaximumFillToAlign(alignment);
const int aligned_size_in_bytes = size_in_bytes + requested_filler_size;
auto result = space_->RawAllocateBackground(
local_heap_, aligned_size_in_bytes, aligned_size_in_bytes, origin);
if (!result) return AllocationResult::Failure();
DCHECK_GE(result->second, aligned_size_in_bytes);
HeapObject object = HeapObject::FromAddress(result->first);
const int filler_size = Heap::GetFillToAlign(object.address(), alignment);
// Actually align the allocation.
if (filler_size)
object = local_heap_->heap()->PrecedeWithFiller(object, filler_size);
if (IsBlackAllocationEnabled()) {
owning_heap()->incremental_marking()->MarkBlackBackground(object,
object_size);
size_in_bytes);
}
return AllocationResult::FromObject(object);
......
......@@ -33,11 +33,11 @@ class StressConcurrentAllocatorTask : public CancelableTask {
// Allocations are served from a TLAB if possible.
class ConcurrentAllocator {
public:
static const int kLabSize = 4 * KB;
static const int kMaxLabSize = 32 * KB;
static const int kMaxLabObjectSize = 2 * KB;
static constexpr int kMinLabSize = 4 * KB;
static constexpr int kMaxLabSize = 32 * KB;
static constexpr int kMaxLabObjectSize = 2 * KB;
explicit ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
: local_heap_(local_heap),
space_(space),
lab_(LocalAllocationBuffer::InvalidBuffer()) {}
......@@ -52,16 +52,21 @@ class ConcurrentAllocator {
void UnmarkLinearAllocationArea();
private:
V8_EXPORT_PRIVATE AllocationResult AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
static_assert(
kMinLabSize > kMaxLabObjectSize,
"LAB size must be larger than max LAB object size as the fast "
"paths do not consider alignment. The assumption is that any object with "
"size <= kMaxLabObjectSize will fit into a newly allocated LAB of size "
"kLabSize after computing the alignment requirements.");
V8_EXPORT_PRIVATE AllocationResult
AllocateInLabSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
bool EnsureLab(AllocationOrigin origin);
inline AllocationResult AllocateInLab(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin);
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
V8_EXPORT_PRIVATE AllocationResult
AllocateOutsideLab(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
bool IsBlackAllocationEnabled() const;
......
......@@ -34,7 +34,7 @@ class LinearAllocationArea final {
void ResetStart() { start_ = top_; }
V8_INLINE bool CanIncrementTop(size_t bytes) {
V8_INLINE bool CanIncrementTop(size_t bytes) const {
Verify();
return (top_ + bytes) <= limit_;
}
......
......@@ -625,11 +625,10 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
}
base::Optional<std::pair<Address, size_t>>
PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationOrigin origin) {
DCHECK(!is_compaction_space());
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
identity() == MAP_SPACE);
......@@ -639,7 +638,7 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
base::Optional<std::pair<Address, size_t>> result =
TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
alignment, origin);
origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
......@@ -650,8 +649,8 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
RefillFreeList();
// Retry the free list allocation.
result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
result = TryAllocationFromFreeListBackground(min_size_in_bytes,
max_size_in_bytes, origin);
if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) {
......@@ -665,8 +664,8 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
RefillFreeList();
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
result = TryAllocationFromFreeListBackground(min_size_in_bytes,
max_size_in_bytes, origin);
if (result) return result;
}
}
......@@ -675,10 +674,7 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
result = ExpandBackground(max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
}
if (result) return result;
}
if (collector->sweeping_in_progress()) {
......@@ -690,17 +686,17 @@ PagedSpaceBase::RawRefillLabBackground(LocalHeap* local_heap,
RefillFreeList();
// Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground(
min_size_in_bytes, max_size_in_bytes, alignment, origin);
return TryAllocationFromFreeListBackground(min_size_in_bytes,
max_size_in_bytes, origin);
}
return {};
}
base::Optional<std::pair<Address, size_t>>
PagedSpaceBase::TryAllocationFromFreeListBackground(
size_t min_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
PagedSpaceBase::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationOrigin origin) {
base::MutexGuard lock(&space_mutex_);
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
......
......@@ -150,10 +150,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Allocate the requested number of bytes in the space from a background
// thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
RawAllocateBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
size_t max_size_in_bytes, AllocationOrigin origin);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
......@@ -387,7 +385,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
......
......@@ -162,6 +162,14 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
: AllocationResult::FromObject(object);
}
AllocationResult LocalAllocationBuffer::AllocateRawUnaligned(
int size_in_bytes) {
return allocation_info_.CanIncrementTop(size_in_bytes)
? AllocationResult::FromObject(HeapObject::FromAddress(
allocation_info_.IncrementTop(size_in_bytes)))
: AllocationResult::Failure();
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
......
......@@ -452,6 +452,8 @@ class LocalAllocationBuffer {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes);
inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment