Commit 7f2dc889 authored by ofrobots's avatar ofrobots Committed by Commit bot

remove recursion from NewSpace::AllocateRaw*

The recursion between AllocateRaw* and SlowAllocateRaw makes incremental
stepping very complicated. This patch removes the recursion. Follow-on
patches will improve accounting of allocations done by incremental mark.

See: https://codereview.chromium.org/1252053003/#msg5

BUG=
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1265443003

Cr-Commit-Position: refs/heads/master@{#29989}
parent d689c7a7
......@@ -345,23 +345,29 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, alignment);
if (allocation_info_.limit() - top < aligned_size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, alignment)) {
return AllocationResult::Retry();
}
HeapObject* obj = HeapObject::FromAddress(old_top);
allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
top = allocation_info_.top();
filler_size = Heap::GetFillToAlign(top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
HeapObject* obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
return obj;
......@@ -369,17 +375,20 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
Address old_top = allocation_info_.top();
Address top = allocation_info_.top();
if (allocation_info_.limit() - top < size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
}
if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, kWordAligned);
top = allocation_info_.top();
}
HeapObject* obj = HeapObject::FromAddress(old_top);
allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
HeapObject* obj = HeapObject::FromAddress(top);
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
return obj;
......
......@@ -1469,14 +1469,34 @@ bool NewSpace::AddFreshPage() {
}
AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
int alignment_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + alignment_size;
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (old_top + aligned_size_in_bytes >= high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
}
// Do a step for the bytes allocated on the last page.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
old_top = allocation_info_.top();
top_on_previous_step_ = old_top;
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
}
DCHECK(old_top + aligned_size_in_bytes < high);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
......@@ -1486,19 +1506,8 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
top_on_previous_step_ = new_top;
if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
return AllocateRawAligned(size_in_bytes, alignment);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
return AllocateRawAligned(size_in_bytes, alignment);
} else {
return AllocationResult::Retry();
}
return true;
}
......
......@@ -2658,8 +2658,7 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
MUST_USE_RESULT AllocationResult
SlowAllocateRaw(int size_in_bytes, AllocationAlignment alignment);
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
friend class SemiSpaceIterator;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment