Commit 53e295ba authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Rename main thread allocation functions

Rename functions for main thread allocation to indicate that they are
used for main thread allocation.

Bug: v8:10315
Change-Id: Idd359a7a439ec2e93f0bdc2f1bed987755790bbe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2292308Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68828}
parent 426af6c0
......@@ -89,12 +89,11 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false;
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
return RefillLabMain(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateLinearly(int size_in_bytes) {
......@@ -130,7 +129,7 @@ AllocationResult PagedSpace::TryAllocateLinearlyAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
AllocationResult result = AllocateLinearly(size_in_bytes);
......@@ -154,7 +153,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
if (!EnsureLabMain(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
......
......@@ -512,8 +512,8 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
new PagedSpaceObjectIterator(heap, this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin) {
bool PagedSpace::RefillLabFromFreeListMain(size_t size_in_bytes,
AllocationOrigin origin) {
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
#ifdef DEBUG
......@@ -859,24 +859,7 @@ size_t PagedSpace::SizeOfObjects() {
return Size() - (limit() - top());
}
bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!is_local_space());
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Complete sweeping for this space.
collector->DrainSweepingWorklistForSpace(identity());
RefillFreeList();
// After waiting for the sweeper threads, there may be new free-list
// entries.
return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
......@@ -887,31 +870,28 @@ bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
optional_mutex.emplace(&allocation_mutex_);
}
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
return RawRefillLabMain(size_in_bytes, origin);
}
bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
bool CompactionSpace::RefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
return RawRefillLabMain(size_in_bytes, origin);
}
bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
bool OffThreadSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
if (RefillLabFromFreeListMain(size_in_bytes, origin)) return true;
if (heap()->CanExpandOldGenerationBackground(size_in_bytes) && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
return RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin);
}
return false;
}
bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
// Non-compaction local spaces are not supported.
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
......@@ -919,8 +899,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
return true;
if (RefillLabFromFreeListMain(size_in_bytes, origin)) return true;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
......@@ -935,11 +914,10 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
RefillFreeList();
// Retry the free list allocation.
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin))
if (RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes), origin))
return true;
if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
origin))
return true;
}
......@@ -951,8 +929,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
if (RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin))
if (RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes), origin))
return true;
}
}
......@@ -966,23 +943,29 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
}
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes), origin);
return RefillLabFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin);
}
}
if (is_compaction_space()) {
return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
return ContributeToSweepingMain(0, 0, size_in_bytes, origin);
} else {
// If sweeper threads are active, wait for them at that point and steal
// elements from their free-lists. Allocation may still fail here which
// would indicate that there is not enough memory for the given allocation.
return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
DCHECK(!is_local_space());
if (collector->sweeping_in_progress()) {
// Complete sweeping for this space.
collector->DrainSweepingWorklistForSpace(identity());
RefillFreeList();
// Last try to acquire memory from free list.
return RefillLabFromFreeListMain(size_in_bytes, origin);
}
return false;
}
}
bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
int max_pages, int size_in_bytes,
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
......@@ -998,7 +981,7 @@ bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
invalidated_slots_in_free_space);
RefillFreeList();
if (max_freed >= size_in_bytes)
return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
return RefillLabFromFreeListMain(size_in_bytes, origin);
}
return false;
}
......
......@@ -353,8 +353,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin);
inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline AllocationResult AllocateLinearly(int size_in_bytes);
......@@ -365,31 +364,24 @@ class V8_EXPORT_PRIVATE PagedSpace
inline AllocationResult TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes, AllocationOrigin origin);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool RefillLabFromFreeListMain(size_t size_in_bytes,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
int max_pages,
int size_in_bytes,
AllocationOrigin origin);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// Refills LAB for EnsureLabMain. This function is space-dependent. Returns
// false if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin);
// Actual implementation of refilling LAB. Returns false if there is not
// enough space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(LocalHeap* local_heap,
......@@ -449,8 +441,8 @@ class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
};
// A collection of |CompactionSpace|s used by a single compaction task.
......@@ -554,8 +546,8 @@ class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
}
protected:
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes, AllocationOrigin origin) override;
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
void RefillFreeList() override;
};
......
......@@ -5461,8 +5461,8 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
}
bool HeapTester::CodeEnsureLinearAllocationArea(Heap* heap, int size_in_bytes) {
return heap->code_space()->EnsureLinearAllocationArea(
size_in_bytes, AllocationOrigin::kRuntime);
return heap->code_space()->EnsureLabMain(size_in_bytes,
AllocationOrigin::kRuntime);
}
HEAP_TEST(Regress587004) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment