Commit 97b2a814 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Fix black allocation.

This patch ensures that an object returned by AllocateRaw is marked
black if black allocation starts during the object allocation.

This fixes the following issue:
1) Generated code requests allocation of size N for folded allocation.
2) Runtime gets a free list node at address A of size N+M and sets up
   a linear allocation area with top = A+N and limit = A+N+M.
3) Runtime invokes the allocation observer that starts incremental marking
   and start black allocation. The area [A+N, A+N+M) is marked black.
4) Runtime returns a white object at address A as the allocation result.
5) Generated code moves the top pointer to A and does bump pointer
   allocations of white objects from A to A+N+M.
6) Object allocated new A+N can have the impossible marbit pattern.

Bug: chromium:694255
Change-Id: I09ceebc97a510fa5fe4ff20706bc46a99f8b7cf4
Reviewed-on: https://chromium-review.googlesource.com/638338
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48005}
parent 6aafc43e
......@@ -23,12 +23,26 @@
namespace v8 {
namespace internal {
void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) {
VMState<GC> state(incremental_marking_.heap()->isolate());
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
Heap* heap = incremental_marking_.heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
incremental_marking_.heap()->isolate(),
&RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
heap->isolate(), &RuntimeCallStats::GC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
if (incremental_marking_.black_allocation() && addr != nullptr) {
// AdvanceIncrementalMarkingOnAllocation can start black allocation.
// Ensure that the new object is marked black.
HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) &&
!heap->InNewSpace(object)) {
if (heap->lo_space()->Contains(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object);
} else {
Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
}
}
}
}
IncrementalMarking::IncrementalMarking(Heap* heap)
......
......@@ -280,20 +280,6 @@ bool FreeListCategory::is_linked() {
return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
}
// Try linear allocation in the page of alloc_info's allocation top. Does
// not contain slow case logic (e.g. move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
......@@ -311,14 +297,28 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
return true;
if (free_list_.Allocate(size_in_bytes)) return true;
return SlowAllocateRaw(size_in_bytes);
}
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment) {
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
DCHECK_LE(new_top, allocation_info_.limit());
allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
HeapObject* PagedSpace::TryAllocateLinearlyAligned(
int* size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
if (new_top > allocation_info_.limit()) return nullptr;
allocation_info_.set_top(new_top);
if (filler_size > 0) {
......@@ -330,74 +330,40 @@ HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
AllocationResult PagedSpace::AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object == NULL) {
object = free_list_.Allocate(size_in_bytes);
if (object == NULL) {
object = SlowAllocateRaw(size_in_bytes);
}
if (object != NULL && heap()->incremental_marking()->black_allocation()) {
Address start = object->address();
Address end = object->address() + size_in_bytes;
Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
}
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
if (object != NULL) {
if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
HeapObject* object = AllocateLinearly(size_in_bytes);
DCHECK_NOT_NULL(object);
if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
return AllocationResult::Retry(identity());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
}
// Raw allocation.
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes;
HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
if (object == NULL) {
HeapObject* object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object == nullptr) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
object = free_list_.Allocate(allocation_size);
if (object == NULL) {
object = SlowAllocateRaw(allocation_size);
}
if (object != NULL) {
if (heap()->incremental_marking()->black_allocation()) {
Address start = object->address();
Address end = object->address() + allocation_size;
Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
}
if (filler_size != 0) {
object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
alignment);
// Filler objects are initialized, so mark only the aligned object
// memory as uninitialized.
allocation_size = size_in_bytes;
}
if (!EnsureLinearAllocationArea(allocation_size)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK_NOT_NULL(object);
}
if (object != NULL) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
return object;
}
return AllocationResult::Retry(identity());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
}
......@@ -414,6 +380,9 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
HeapObject* heap_obj = nullptr;
if (!result.IsRetry() && result.To(&heap_obj)) {
AllocationStep(heap_obj->address(), size_in_bytes);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
}
......
......@@ -2856,11 +2856,7 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
return node;
}
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
// or allocate a new page before retrying.
HeapObject* FreeList::Allocate(size_t size_in_bytes) {
bool FreeList::Allocate(size_t size_in_bytes) {
DCHECK(size_in_bytes <= kMaxBlockSize);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit());
......@@ -2886,7 +2882,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr;
if (new_node == nullptr) return false;
DCHECK_GE(new_node_size, size_in_bytes);
size_t bytes_left = new_node_size - size_in_bytes;
......@@ -2911,10 +2907,10 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
Page::FromAddress(new_node->address()));
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
// Keep the linear allocation area to fit exactly the requested size.
// Return the rest to the free list.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
owner_->SetAllocationInfo(new_node->address(),
new_node->address() + size_in_bytes);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
......@@ -2928,16 +2924,14 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
owner_->SetAllocationInfo(
new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
new_node->address(), new_node->address() + size_in_bytes + linear_size);
} else {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
owner_->SetAllocationInfo(new_node->address(),
new_node->address() + new_node_size);
}
return new_node;
return true;
}
size_t FreeList::EvictFreeListItems(Page* page) {
......@@ -3103,7 +3097,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
}
HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
......@@ -3113,30 +3107,30 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// entries.
return free_list_.Allocate(size_in_bytes);
}
return nullptr;
return false;
}
HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->SweepAndRefill(this);
return free_list_.Allocate(size_in_bytes);
}
return nullptr;
return false;
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
bool PagedSpace::SlowAllocateRaw(int size_in_bytes) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), &RuntimeCallStats::GC_Custom_SlowAllocateRaw);
return RawSlowAllocateRaw(size_in_bytes);
}
HeapObject* CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
bool CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
return RawSlowAllocateRaw(size_in_bytes);
}
HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
......@@ -3154,17 +3148,13 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
HeapObject* object =
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != NULL) return object;
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (locked_page_ != nullptr) {
DCHECK_EQ(locked_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(locked_page_, identity());
locked_page_ = nullptr;
HeapObject* object =
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
// If sweeping is still in progress try to sweep pages.
......@@ -3172,8 +3162,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
......@@ -3182,9 +3171,7 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
HeapObject* object =
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
}
......@@ -3326,14 +3313,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
AllocationStep(object->address(), object_size);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
}
AllocationStep(object->address(), object_size);
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
return object;
}
......
......@@ -1748,10 +1748,10 @@ class V8_EXPORT_PRIVATE FreeList {
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
// Allocate a block of size {size_in_bytes} from the free list. The block is
// unitialized. A failure is returned if no block is available. The size
// should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
// Finds a node of size at least size_in_bytes and sets up a linear allocation
// area using this node. Returns false if there is no such node and the caller
// has to retry allocation after collecting garbage.
MUST_USE_RESULT bool Allocate(size_t size_in_bytes);
// Clear the free list.
void Reset();
......@@ -2200,26 +2200,33 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// size limit has been hit.
bool Expand();
// Generic fast case allocation function that tries linear allocation at the
// address denoted by top in allocation_info_.
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
inline bool EnsureLinearAllocationArea(int size_in_bytes);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject* AllocateLinearly(int size_in_bytes);
// Generic fast case allocation function that tries aligned linear allocation
// at the address denoted by top in allocation_info_. Writes the aligned
// allocation size, which includes the filler size, to size_in_bytes.
inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
// Tries to allocate an aligned object from the linear allocation area.
// Returns nullptr if the linear allocation area does not fit the object.
// Otherwise, returns the object pointer and writes the allocation size
// (object size + alignment filler size) to the size_in_bytes.
inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
int size_in_bytes);
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
MUST_USE_RESULT virtual bool SlowAllocateRaw(int size_in_bytes);
MUST_USE_RESULT HeapObject* RawSlowAllocateRaw(int size_in_bytes);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
MUST_USE_RESULT bool RawSlowAllocateRaw(int size_in_bytes);
size_t area_size_;
......@@ -2784,10 +2791,9 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
int size_in_bytes) override;
MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes) override;
MUST_USE_RESULT bool SlowAllocateRaw(int size_in_bytes) override;
};
......
......@@ -1709,11 +1709,8 @@ TEST(TestAlignedOverAllocation) {
if (double_misalignment) {
start = AlignOldSpace(kDoubleAligned, 0);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
// The object is aligned, and a filler object is created after.
// The object is aligned.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
filler = HeapObject::FromAddress(start + kPointerSize);
CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize);
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleAligned, kPointerSize);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
......@@ -1728,11 +1725,8 @@ TEST(TestAlignedOverAllocation) {
// Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
// The object is aligned, and a filler object is created after.
// The object is aligned.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
filler = HeapObject::FromAddress(start + kPointerSize);
CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize);
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleUnaligned, kPointerSize);
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment