Commit 97b2a814 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Fix black allocation.

This patch ensures that an object returned by AllocateRaw is marked
black if black allocation starts during the object allocation.

This fixes the following issue:
1) Generated code requests allocation of size N for folded allocation.
2) Runtime gets a free list node at address A of size N+M and sets up
   a linear allocation area with top = A+N and limit = A+N+M.
3) Runtime invokes the allocation observer that starts incremental marking
   and start black allocation. The area [A+N, A+N+M) is marked black.
4) Runtime returns a white object at address A as the allocation result.
5) Generated code moves the top pointer to A and does bump pointer
   allocations of white objects from A to A+N+M.
6) Object allocated new A+N can have the impossible marbit pattern.

Bug: chromium:694255
Change-Id: I09ceebc97a510fa5fe4ff20706bc46a99f8b7cf4
Reviewed-on: https://chromium-review.googlesource.com/638338
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48005}
parent 6aafc43e
...@@ -23,12 +23,26 @@ ...@@ -23,12 +23,26 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) { void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
VMState<GC> state(incremental_marking_.heap()->isolate()); size_t size) {
Heap* heap = incremental_marking_.heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer( RuntimeCallTimerScope runtime_timer(
incremental_marking_.heap()->isolate(), heap->isolate(), &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
&RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation(); incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
if (incremental_marking_.black_allocation() && addr != nullptr) {
// AdvanceIncrementalMarkingOnAllocation can start black allocation.
// Ensure that the new object is marked black.
HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) &&
!heap->InNewSpace(object)) {
if (heap->lo_space()->Contains(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object);
} else {
Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
}
}
}
} }
IncrementalMarking::IncrementalMarking(Heap* heap) IncrementalMarking::IncrementalMarking(Heap* heap)
......
...@@ -280,20 +280,6 @@ bool FreeListCategory::is_linked() { ...@@ -280,20 +280,6 @@ bool FreeListCategory::is_linked() {
return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this; return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
} }
// Try linear allocation in the page of alloc_info's allocation top. Does
// not contain slow case logic (e.g. move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned( AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) { int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top(); Address current_top = allocation_info_.top();
...@@ -311,14 +297,28 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned( ...@@ -311,14 +297,28 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top)); return AllocationResult(HeapObject::FromAddress(current_top));
} }
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
return true;
if (free_list_.Allocate(size_in_bytes)) return true;
return SlowAllocateRaw(size_in_bytes);
}
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes, HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
AllocationAlignment alignment) { Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
HeapObject* PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top(); Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment); int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes; Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL; if (new_top > allocation_info_.limit()) return nullptr;
allocation_info_.set_top(new_top); allocation_info_.set_top(new_top);
if (filler_size > 0) { if (filler_size > 0) {
...@@ -330,74 +330,40 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes, ...@@ -330,74 +330,40 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
return HeapObject::FromAddress(current_top); return HeapObject::FromAddress(current_top);
} }
// Raw allocation.
AllocationResult PagedSpace::AllocateRawUnaligned( AllocationResult PagedSpace::AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list) { int size_in_bytes, UpdateSkipList update_skip_list) {
HeapObject* object = AllocateLinearly(size_in_bytes); if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
if (object == NULL) {
object = free_list_.Allocate(size_in_bytes);
if (object == NULL) {
object = SlowAllocateRaw(size_in_bytes);
}
if (object != NULL && heap()->incremental_marking()->black_allocation()) {
Address start = object->address();
Address end = object->address() + size_in_bytes;
Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
}
} }
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) { DCHECK_NOT_NULL(object);
if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) { if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes); SkipList::Update(object->address(), size_in_bytes);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
} }
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return AllocationResult::Retry(identity()); return object;
} }
// Raw allocation.
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) { AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE); DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes; int allocation_size = size_in_bytes;
HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment); HeapObject* object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object == nullptr) {
if (object == NULL) {
// We don't know exactly how much filler we need to align until space is // We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case. // allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment); int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size; allocation_size += filler_size;
object = free_list_.Allocate(allocation_size); if (!EnsureLinearAllocationArea(allocation_size)) {
if (object == NULL) { return AllocationResult::Retry(identity());
object = SlowAllocateRaw(allocation_size);
}
if (object != NULL) {
if (heap()->incremental_marking()->black_allocation()) {
Address start = object->address();
Address end = object->address() + allocation_size;
Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
}
if (filler_size != 0) {
object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
alignment);
// Filler objects are initialized, so mark only the aligned object
// memory as uninitialized.
allocation_size = size_in_bytes;
}
} }
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK_NOT_NULL(object);
} }
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
if (object != NULL) { return object;
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
return object;
}
return AllocationResult::Retry(identity());
} }
...@@ -414,6 +380,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, ...@@ -414,6 +380,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
HeapObject* heap_obj = nullptr; HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj)) { if (!result.IsRetry() && result.To(&heap_obj)) {
AllocationStep(heap_obj->address(), size_in_bytes); AllocationStep(heap_obj->address(), size_in_bytes);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
} }
return result; return result;
} }
......
...@@ -2856,11 +2856,7 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) { ...@@ -2856,11 +2856,7 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node; return node;
} }
// Allocation on the old space free list. If it succeeds then a new linear bool FreeList::Allocate(size_t size_in_bytes) {
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
// or allocate a new page before retrying.
HeapObject* FreeList::Allocate(size_t size_in_bytes) {
DCHECK(size_in_bytes <= kMaxBlockSize); DCHECK(size_in_bytes <= kMaxBlockSize);
DCHECK(IsAligned(size_in_bytes, kPointerSize)); DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit()); DCHECK_LE(owner_->top(), owner_->limit());
...@@ -2886,7 +2882,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) { ...@@ -2886,7 +2882,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
size_t new_node_size = 0; size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr; if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes); DCHECK_GE(new_node_size, size_in_bytes);
size_t bytes_left = new_node_size - size_in_bytes; size_t bytes_left = new_node_size - size_in_bytes;
...@@ -2911,10 +2907,10 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) { ...@@ -2911,10 +2907,10 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
Page::FromAddress(new_node->address())); Page::FromAddress(new_node->address()));
if (owner_->heap()->inline_allocation_disabled()) { if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just // Keep the linear allocation area to fit exactly the requested size.
// return area back to the free list instead. // Return the rest to the free list.
owner_->Free(new_node->address() + size_in_bytes, bytes_left); owner_->Free(new_node->address() + size_in_bytes, bytes_left);
owner_->SetAllocationInfo(new_node->address() + size_in_bytes, owner_->SetAllocationInfo(new_node->address(),
new_node->address() + size_in_bytes); new_node->address() + size_in_bytes);
} else if (bytes_left > kThreshold && } else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() && owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
...@@ -2928,16 +2924,14 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) { ...@@ -2928,16 +2924,14 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
owner_->Free(new_node->address() + size_in_bytes + linear_size, owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size); new_node_size - size_in_bytes - linear_size);
owner_->SetAllocationInfo( owner_->SetAllocationInfo(
new_node->address() + size_in_bytes, new_node->address(), new_node->address() + size_in_bytes + linear_size);
new_node->address() + size_in_bytes + linear_size);
} else { } else {
// Normally we give the rest of the node to the allocator as its new // Normally we give the rest of the node to the allocator as its new
// linear allocation area. // linear allocation area.
owner_->SetAllocationInfo(new_node->address() + size_in_bytes, owner_->SetAllocationInfo(new_node->address(),
new_node->address() + new_node_size); new_node->address() + new_node_size);
} }
return true;
return new_node;
} }
size_t FreeList::EvictFreeListItems(Page* page) { size_t FreeList::EvictFreeListItems(Page* page) {
...@@ -3103,7 +3097,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() { ...@@ -3103,7 +3097,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
} }
} }
HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) { bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) { if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase. // Wait for the sweeper threads here and complete the sweeping phase.
...@@ -3113,30 +3107,30 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) { ...@@ -3113,30 +3107,30 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// entries. // entries.
return free_list_.Allocate(size_in_bytes); return free_list_.Allocate(size_in_bytes);
} }
return nullptr; return false;
} }
HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) { bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) { if (collector->sweeping_in_progress()) {
collector->SweepAndRefill(this); collector->SweepAndRefill(this);
return free_list_.Allocate(size_in_bytes); return free_list_.Allocate(size_in_bytes);
} }
return nullptr; return false;
} }
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
VMState<GC> state(heap()->isolate()); VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer( RuntimeCallTimerScope runtime_timer(
heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw); heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
return RawSlowAllocateRaw(size_in_bytes); return RawSlowAllocateRaw(size_in_bytes);
} }
HeapObject* CompactionSpace::SlowAllocateRaw(int size_in_bytes) { bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
return RawSlowAllocateRaw(size_in_bytes); return RawSlowAllocateRaw(size_in_bytes);
} }
HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) { bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed. // Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0); DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1; const int kMaxPagesToSweep = 1;
...@@ -3154,17 +3148,13 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) { ...@@ -3154,17 +3148,13 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList(); RefillFreeList();
// Retry the free list allocation. // Retry the free list allocation.
HeapObject* object = if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != NULL) return object;
if (locked_page_ != nullptr) { if (locked_page_ != nullptr) {
DCHECK_EQ(locked_page_->owner()->identity(), identity()); DCHECK_EQ(locked_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(locked_page_, identity()); collector->sweeper().ParallelSweepPage(locked_page_, identity());
locked_page_ = nullptr; locked_page_ = nullptr;
HeapObject* object = if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
} }
// If sweeping is still in progress try to sweep pages. // If sweeping is still in progress try to sweep pages.
...@@ -3172,8 +3162,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) { ...@@ -3172,8 +3162,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
identity(), size_in_bytes, kMaxPagesToSweep); identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList(); RefillFreeList();
if (max_freed >= size_in_bytes) { if (max_freed >= size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (object != nullptr) return object;
} }
} else if (is_local()) { } else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can // Sweeping not in progress and we are on a {CompactionSpace}. This can
...@@ -3182,9 +3171,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) { ...@@ -3182,9 +3171,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes); Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) { if (page != nullptr) {
AddPage(page); AddPage(page);
HeapObject* object = if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
} }
} }
...@@ -3326,14 +3313,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -3326,14 +3313,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->StartIncrementalMarkingIfAllocationLimitIsReached( heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection); Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
AllocationStep(object->address(), object_size);
heap()->CreateFillerObjectAt(object->address(), object_size, heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) { if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object); heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
} }
AllocationStep(object->address(), object_size);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object; return object;
} }
......
...@@ -1748,10 +1748,10 @@ class V8_EXPORT_PRIVATE FreeList { ...@@ -1748,10 +1748,10 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size. // and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode); size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
// Allocate a block of size {size_in_bytes} from the free list. The block is // Finds a node of size at least size_in_bytes and sets up a linear allocation
// unitialized. A failure is returned if no block is available. The size // area using this node. Returns false if there is no such node and the caller
// should be a non-zero multiple of the word size. // has to retry allocation after collecting garbage.
MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes); MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
// Clear the free list. // Clear the free list.
void Reset(); void Reset();
...@@ -2200,26 +2200,33 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { ...@@ -2200,26 +2200,33 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// size limit has been hit. // size limit has been hit.
bool Expand(); bool Expand();
// Generic fast case allocation function that tries linear allocation at the // Sets up a linear allocation area that fits the given number of bytes.
// address denoted by top in allocation_info_. // Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject* AllocateLinearly(int size_in_bytes); inline HeapObject* AllocateLinearly(int size_in_bytes);
// Tries to allocate an aligned object from the linear allocation area.
// Generic fast case allocation function that tries aligned linear allocation // Returns nullptr if the linear allocation area does not fit the object.
// at the address denoted by top in allocation_info_. Writes the aligned // Otherwise, returns the object pointer and writes the allocation size
// allocation size, which includes the filler size, to size_in_bytes. // (object size + alignment filler size) to the size_in_bytes.
inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes, inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment); AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is // If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list // not successful, wait for the sweeper threads and retry free-list
// allocation. // allocation. Returns false if there is not enough space and the caller
MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation( // has to retry after collecting garbage.
int size_in_bytes); MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent. // Slow path of AllocateRaw. This function is space-dependent. Returns false
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); // if there is not enough space and the caller has to retry after
// collecting garbage.
MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT HeapObject* RawSlowAllocateRaw(int size_in_bytes); // Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
size_t area_size_; size_t area_size_;
...@@ -2784,10 +2791,9 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace { ...@@ -2784,10 +2791,9 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// The space is temporary and not included in any snapshots. // The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; } bool snapshotable() override { return false; }
MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
int size_in_bytes) override;
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes) override; MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
}; };
......
...@@ -1709,11 +1709,8 @@ TEST(TestAlignedOverAllocation) { ...@@ -1709,11 +1709,8 @@ TEST(TestAlignedOverAllocation) {
if (double_misalignment) { if (double_misalignment) {
start = AlignOldSpace(kDoubleAligned, 0); start = AlignOldSpace(kDoubleAligned, 0);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
// The object is aligned, and a filler object is created after. // The object is aligned.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment)); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
filler = HeapObject::FromAddress(start + kPointerSize);
CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize);
// Try the opposite alignment case. // Try the opposite alignment case.
start = AlignOldSpace(kDoubleAligned, kPointerSize); start = AlignOldSpace(kDoubleAligned, kPointerSize);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
...@@ -1728,11 +1725,8 @@ TEST(TestAlignedOverAllocation) { ...@@ -1728,11 +1725,8 @@ TEST(TestAlignedOverAllocation) {
// Similarly for kDoubleUnaligned. // Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0); start = AlignOldSpace(kDoubleUnaligned, 0);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
// The object is aligned, and a filler object is created after. // The object is aligned.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize)); CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
filler = HeapObject::FromAddress(start + kPointerSize);
CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize);
// Try the opposite alignment case. // Try the opposite alignment case.
start = AlignOldSpace(kDoubleUnaligned, kPointerSize); start = AlignOldSpace(kDoubleUnaligned, kPointerSize);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned); obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment