Commit 3b467cb5 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Make slow path in concurrent allocator more explicit

Restructure code to make slow path of allocation more obvious.

Bug: v8:10315
Change-Id: Ic3e3b866b144b6f2877acac4accf87377f757172
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2276273
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68651}
parent 98d843c8
......@@ -33,49 +33,18 @@ Address ConcurrentAllocator::AllocateOrFail(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
AllocationResult result = Allocate(object_size, alignment, origin);
if (!result.IsRetry()) return result.ToObjectChecked().address();
if (!result.IsRetry()) return result.ToObject().address();
return PerformCollectionAndAllocateAgain(object_size, alignment, origin);
}
AllocationResult ConcurrentAllocator::AllocateInLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult allocation;
if (!lab_.IsValid() && !EnsureLab(origin)) {
return AllocationResult::Retry(space_->identity());
}
allocation = lab_.AllocateRawAligned(object_size, alignment);
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
if (allocation.IsRetry()) {
if (!EnsureLab(origin)) {
return AllocationResult::Retry(space_->identity());
return AllocateInLabSlow(object_size, alignment, origin);
} else {
allocation = lab_.AllocateRawAligned(object_size, alignment);
CHECK(!allocation.IsRetry());
}
}
return allocation;
}
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->SlowGetLinearAllocationAreaBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
Address top = result->first;
Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
local_heap_->heap(), AllocationResult(object), result->second);
DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseAndMakeIterable();
}
return true;
}
} // namespace internal
......
......@@ -98,12 +98,47 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
}
}
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
return AllocationResult::Retry(OLD_SPACE);
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
DCHECK(!allocation.IsRetry());
return allocation;
}
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->SlowGetLinearAllocationAreaBackground(
local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false;
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
Address top = result->first;
Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
local_heap_->heap(), AllocationResult(object), result->second);
DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseAndMakeIterable();
}
return true;
}
AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->SlowGetLinearAllocationAreaBackground(
local_heap_, object_size, object_size, alignment, origin);
if (!result) return AllocationResult::Retry(OLD_SPACE);
if (result) {
HeapObject object = HeapObject::FromAddress(result->first);
if (local_heap_->heap()->incremental_marking()->black_allocation()) {
......@@ -112,9 +147,6 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
}
return AllocationResult(object);
} else {
return AllocationResult::Retry(OLD_SPACE);
}
}
} // namespace internal
......
......@@ -55,7 +55,10 @@ class ConcurrentAllocator {
void UnmarkLinearAllocationArea();
private:
inline bool EnsureLab(AllocationOrigin origin);
V8_EXPORT_PRIVATE AllocationResult AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
bool EnsureLab(AllocationOrigin origin);
inline AllocationResult AllocateInLab(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin);
......
......@@ -61,6 +61,11 @@ HeapObject AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
HeapObject AllocationResult::ToObject() {
DCHECK(!IsRetry());
return HeapObject::cast(object_);
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
......
......@@ -194,6 +194,7 @@ class AllocationResult {
inline bool IsRetry() { return object_.IsSmi(); }
inline HeapObject ToObjectChecked();
inline HeapObject ToObject();
inline AllocationSpace RetrySpace();
template <typename T>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment