Commit af5f437c authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Fix tracking of code pages for V8 stack unwinder

When a compaction space allocates a new code page, that pages needs to
be added to the Isolate::code_pages_ array used for stack unwinding.
Since the array is owned by the main thread, compaction thread cannot
directly modify it. Because of that code pages are added upon merging
of the compaction space to the main spage in MergeLocalSpace.

The bug was that all code pages coming from the compaction space
were added to the code_pages_ array. However, some of the pages are
not newly allocated but merely borrowed from the main space.

This CL introduces a new page flag for marking pages that are borrowed
during compaction and skips them in MergeLocalSpace.

Bug: v8:10900
Change-Id: I786dc5747bd7c785ae58dfd8b841c00774efb15e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2416500Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69992}
parent 6443a79f
...@@ -4583,11 +4583,33 @@ SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate, ...@@ -4583,11 +4583,33 @@ SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate,
#ifdef DEBUG #ifdef DEBUG
AssertNoContextChange::AssertNoContextChange(Isolate* isolate) AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {} : isolate_(isolate), context_(isolate->context(), isolate) {}
namespace {
bool Overlapping(const MemoryRange& a, const MemoryRange& b) {
uintptr_t a1 = reinterpret_cast<uintptr_t>(a.start);
uintptr_t a2 = a1 + a.length_in_bytes;
uintptr_t b1 = reinterpret_cast<uintptr_t>(b.start);
uintptr_t b2 = b1 + b.length_in_bytes;
// Either b1 or b2 are in the [a1, a2) range.
return (a1 <= b1 && b1 < a2) || (a1 <= b2 && b2 < a2);
}
} // anonymous namespace
#endif // DEBUG #endif // DEBUG
void Isolate::AddCodeMemoryRange(MemoryRange range) { void Isolate::AddCodeMemoryRange(MemoryRange range) {
std::vector<MemoryRange>* old_code_pages = GetCodePages(); std::vector<MemoryRange>* old_code_pages = GetCodePages();
DCHECK_NOT_NULL(old_code_pages); DCHECK_NOT_NULL(old_code_pages);
#ifdef DEBUG
auto overlapping = [range](const MemoryRange& a) {
return Overlapping(range, a);
};
DCHECK_EQ(old_code_pages->end(),
std::find_if(old_code_pages->begin(), old_code_pages->end(),
overlapping));
#endif
std::vector<MemoryRange>* new_code_pages; std::vector<MemoryRange>* new_code_pages;
if (old_code_pages == &code_pages_buffer1_) { if (old_code_pages == &code_pages_buffer1_) {
...@@ -4701,7 +4723,7 @@ void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) { ...@@ -4701,7 +4723,7 @@ void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) {
[removed_page_start](const MemoryRange& range) { [removed_page_start](const MemoryRange& range) {
return range.start == removed_page_start; return range.start == removed_page_start;
}); });
DCHECK_EQ(old_code_pages->size(), new_code_pages->size() + 1);
// Atomically switch out the pointer // Atomically switch out the pointer
SetCodePages(new_code_pages); SetCodePages(new_code_pages);
#endif // !defined(V8_TARGET_ARCH_ARM) #endif // !defined(V8_TARGET_ARCH_ARM)
......
...@@ -106,6 +106,11 @@ class BasicMemoryChunk { ...@@ -106,6 +106,11 @@ class BasicMemoryChunk {
// because there exists a potential pointer to somewhere in the chunk which // because there exists a potential pointer to somewhere in the chunk which
// can't be updated. // can't be updated.
PINNED = 1u << 22, PINNED = 1u << 22,
// The memory chunk was borrowed by a compaction space for evacuation.
// This bit helps to distinguish memory chunks allocated by the compaction
// space from freshly allocated memory chunks.
BORROWED_BY_COMPACTION_SPACE = 1u << 23
}; };
static const intptr_t kAlignment = static const intptr_t kAlignment =
......
...@@ -135,7 +135,7 @@ void PagedSpace::RefillFreeList() { ...@@ -135,7 +135,7 @@ void PagedSpace::RefillFreeList() {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner()); PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
base::MutexGuard guard(owner->mutex()); base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p); owner->RefineAllocatedBytesAfterSweeping(p);
owner->RemovePage(p); owner->BorrowPage(p);
added += AddPage(p); added += AddPage(p);
} else { } else {
base::MutexGuard guard(mutex()); base::MutexGuard guard(mutex());
...@@ -179,8 +179,12 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) { ...@@ -179,8 +179,12 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
// Relinking requires the category to be unlinked. // Relinking requires the category to be unlinked.
other->RemovePage(p); other->RemovePage(p);
if (p->IsFlagSet(Page::BORROWED_BY_COMPACTION_SPACE)) {
AddBorrowedPage(p);
} else {
AddPage(p); AddPage(p);
heap()->NotifyOldGenerationExpansion(identity(), p); heap()->NotifyOldGenerationExpansion(identity(), p);
}
DCHECK_IMPLIES( DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE), !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes()); p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
...@@ -232,14 +236,6 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) { ...@@ -232,14 +236,6 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
marking_state->SetLiveBytes(page, 0); marking_state->SetLiveBytes(page, 0);
} }
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
}
size_t PagedSpace::AddPage(Page* page) { size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone()); CHECK(page->SweepingDone());
page->set_owner(this); page->set_owner(this);
...@@ -267,6 +263,26 @@ void PagedSpace::RemovePage(Page* page) { ...@@ -267,6 +263,26 @@ void PagedSpace::RemovePage(Page* page) {
} }
} }
size_t PagedSpace::AddBorrowedPage(Page* page) {
DCHECK(page->IsFlagSet(Page::BORROWED_BY_COMPACTION_SPACE));
page->ClearFlag(Page::BORROWED_BY_COMPACTION_SPACE);
return AddPage(page);
}
void PagedSpace::BorrowPage(Page* page) {
DCHECK(!page->IsFlagSet(Page::BORROWED_BY_COMPACTION_SPACE));
page->SetFlag(Page::BORROWED_BY_COMPACTION_SPACE);
RemovePage(page);
}
Page* PagedSpace::FindAndBorrowPage(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
BorrowPage(page);
return page;
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) { void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit || DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1)); Page::FromAddress(top) == Page::FromAddress(limit - 1));
...@@ -452,7 +468,9 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -452,7 +468,9 @@ void PagedSpace::ReleasePage(Page* page) {
SetTopAndLimit(kNullAddress, kNullAddress); SetTopAndLimit(kNullAddress, kNullAddress);
} }
if (identity() == CODE_SPACE) {
heap()->isolate()->RemoveCodeMemoryChunk(page); heap()->isolate()->RemoveCodeMemoryChunk(page);
}
AccountUncommitted(page->size()); AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size()); accounting_stats_.DecreaseCapacity(page->area_size());
...@@ -861,10 +879,10 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) { ...@@ -861,10 +879,10 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
} }
if (is_compaction_space()) { if (is_compaction_space()) {
// The main thread may have acquired all swept pages. Try to steal from // The main thread may have acquired all swept pages. Try to borrow from
// it. This can only happen during young generation evacuation. // it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity()); PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes); Page* page = main_space->FindAndBorrowPage(size_in_bytes);
if (page != nullptr) { if (page != nullptr) {
AddPage(page); AddPage(page);
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes), if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
......
...@@ -210,9 +210,14 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -210,9 +210,14 @@ class V8_EXPORT_PRIVATE PagedSpace
// free list of the space. // free list of the space.
size_t AddPage(Page* page); size_t AddPage(Page* page);
void RemovePage(Page* page); void RemovePage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can // Adds the page that was previously borrowed to this space and returns
// be used for allocation. // returns the number of bytes added to the free list of the space.
Page* RemovePageSafe(int size_in_bytes); size_t AddBorrowedPage(Page* page);
// Borrow a page for a compaction space.
void BorrowPage(Page* page);
// Find a page that has at least |size_in_bytes| bytes available that can
// be used for allocation and borrow it.
Page* FindAndBorrowPage(int size_in_bytes);
void SetReadable(); void SetReadable();
void SetReadAndExecutable(); void SetReadAndExecutable();
......
...@@ -7272,6 +7272,33 @@ HEAP_TEST(CodeLargeObjectSpace) { ...@@ -7272,6 +7272,33 @@ HEAP_TEST(CodeLargeObjectSpace) {
heap->RemoveHeapObjectAllocationTracker(&allocation_tracker); heap->RemoveHeapObjectAllocationTracker(&allocation_tracker);
} }
TEST(Regress10900) {
FLAG_always_compact = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
i::byte buffer[i::Assembler::kDefaultBufferSize];
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
{
// Generate multiple code pages.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (int i = 0; i < 100; i++) {
factory->CopyCode(code);
}
}
// Force garbage collection that compacts code pages and triggers
// an assertion in Isolate::AddCodeMemoryRange before the bug fix.
CcTest::CollectAllAvailableGarbage();
}
} // namespace heap } // namespace heap
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -57,6 +57,8 @@ class TestMemoryAllocatorScope { ...@@ -57,6 +57,8 @@ class TestMemoryAllocatorScope {
PageAllocator* page_allocator = nullptr) PageAllocator* page_allocator = nullptr)
: isolate_(isolate), : isolate_(isolate),
old_allocator_(std::move(isolate->heap()->memory_allocator_)) { old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
// Save the code pages for restoring them later on because
isolate->GetCodePages()->swap(code_pages_);
isolate->heap()->memory_allocator_.reset( isolate->heap()->memory_allocator_.reset(
new MemoryAllocator(isolate, max_capacity, code_range_size)); new MemoryAllocator(isolate, max_capacity, code_range_size));
if (page_allocator != nullptr) { if (page_allocator != nullptr) {
...@@ -69,12 +71,13 @@ class TestMemoryAllocatorScope { ...@@ -69,12 +71,13 @@ class TestMemoryAllocatorScope {
~TestMemoryAllocatorScope() { ~TestMemoryAllocatorScope() {
isolate_->heap()->memory_allocator()->TearDown(); isolate_->heap()->memory_allocator()->TearDown();
isolate_->heap()->memory_allocator_.swap(old_allocator_); isolate_->heap()->memory_allocator_.swap(old_allocator_);
isolate_->GetCodePages()->swap(code_pages_);
} }
private: private:
Isolate* isolate_; Isolate* isolate_;
std::unique_ptr<MemoryAllocator> old_allocator_; std::unique_ptr<MemoryAllocator> old_allocator_;
std::vector<MemoryRange> code_pages_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope); DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment