Commit b24d8de1 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Fix tracking of code pages for V8 stack unwinder (attempt #2)

When a compaction space allocates a new code page, that pages needs to
be added to the Isolate::code_pages_ array used for stack unwinding.
Since the array is owned by the main thread, compaction thread cannot
directly modify it. Because of that code pages are added upon merging
of the compaction space to the main space in MergeLocalSpace.

The bug was that all code pages coming from the compaction space
were added to the code_pages_ array. However, some of the pages are
not newly allocated but merely borrowed from the main space.

This CL keeps track of all newly allocated paged by a compaction space.

Bug: v8:10900
Change-Id: Iff3ff5d608df60fb752d2e0ffc29e51f2d967936
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2418718
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70023}
parent bdf38425
......@@ -4583,11 +4583,33 @@ SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate,
#ifdef DEBUG
AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
namespace {
bool Overlapping(const MemoryRange& a, const MemoryRange& b) {
uintptr_t a1 = reinterpret_cast<uintptr_t>(a.start);
uintptr_t a2 = a1 + a.length_in_bytes;
uintptr_t b1 = reinterpret_cast<uintptr_t>(b.start);
uintptr_t b2 = b1 + b.length_in_bytes;
// Either b1 or b2 are in the [a1, a2) range.
return (a1 <= b1 && b1 < a2) || (a1 <= b2 && b2 < a2);
}
} // anonymous namespace
#endif // DEBUG
void Isolate::AddCodeMemoryRange(MemoryRange range) {
std::vector<MemoryRange>* old_code_pages = GetCodePages();
DCHECK_NOT_NULL(old_code_pages);
#ifdef DEBUG
auto overlapping = [range](const MemoryRange& a) {
return Overlapping(range, a);
};
DCHECK_EQ(old_code_pages->end(),
std::find_if(old_code_pages->begin(), old_code_pages->end(),
overlapping));
#endif
std::vector<MemoryRange>* new_code_pages;
if (old_code_pages == &code_pages_buffer1_) {
......@@ -4701,7 +4723,7 @@ void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) {
[removed_page_start](const MemoryRange& range) {
return range.start == removed_page_start;
});
DCHECK_EQ(old_code_pages->size(), new_code_pages->size() + 1);
// Atomically switch out the pointer
SetCodePages(new_code_pages);
#endif // !defined(V8_TARGET_ARCH_ARM)
......
......@@ -180,7 +180,6 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
heap()->NotifyOldGenerationExpansion(identity(), p);
DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
......@@ -192,6 +191,9 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
// We'll have to come up with a better solution for allocation stepping
// before shipping, which will likely be using LocalHeap.
}
for (auto p : other->GetNewPages()) {
heap()->NotifyOldGenerationExpansion(identity(), p);
}
DCHECK_EQ(0u, other->Size());
DCHECK_EQ(0u, other->Capacity());
......@@ -452,7 +454,9 @@ void PagedSpace::ReleasePage(Page* page) {
SetTopAndLimit(kNullAddress, kNullAddress);
}
heap()->isolate()->RemoveCodeMemoryChunk(page);
if (identity() == CODE_SPACE) {
heap()->isolate()->RemoveCodeMemoryChunk(page);
}
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
......@@ -828,6 +832,12 @@ bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
return RawRefillLabMain(size_in_bytes, origin);
}
Page* LocalSpace::Expand() {
Page* page = PagedSpace::Expand();
new_pages_.push_back(page);
return page;
}
bool CompactionSpace::RefillLabMain(int size_in_bytes,
AllocationOrigin origin) {
return RawRefillLabMain(size_in_bytes, origin);
......
......@@ -341,7 +341,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
// size limit has been hit.
Page* Expand();
virtual Page* Expand();
Page* ExpandBackground(LocalHeap* local_heap);
Page* AllocatePage();
......@@ -417,9 +417,15 @@ class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
}
const std::vector<Page*>& GetNewPages() { return new_pages_; }
protected:
Page* Expand() override;
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
// Pages that were allocated in this local space and need to be merged
// to the main space.
std::vector<Page*> new_pages_;
};
// -----------------------------------------------------------------------------
......
......@@ -7272,6 +7272,33 @@ HEAP_TEST(CodeLargeObjectSpace) {
heap->RemoveHeapObjectAllocationTracker(&allocation_tracker);
}
TEST(Regress10900) {
FLAG_always_compact = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
i::byte buffer[i::Assembler::kDefaultBufferSize];
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
{
// Generate multiple code pages.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (int i = 0; i < 100; i++) {
factory->CopyCode(code);
}
}
// Force garbage collection that compacts code pages and triggers
// an assertion in Isolate::AddCodeMemoryRange before the bug fix.
CcTest::CollectAllAvailableGarbage();
}
} // namespace heap
} // namespace internal
} // namespace v8
......
......@@ -57,6 +57,9 @@ class TestMemoryAllocatorScope {
PageAllocator* page_allocator = nullptr)
: isolate_(isolate),
old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
// Save the code pages for restoring them later on because the constructor
// of MemoryAllocator will change them.
isolate->GetCodePages()->swap(code_pages_);
isolate->heap()->memory_allocator_.reset(
new MemoryAllocator(isolate, max_capacity, code_range_size));
if (page_allocator != nullptr) {
......@@ -69,12 +72,13 @@ class TestMemoryAllocatorScope {
~TestMemoryAllocatorScope() {
isolate_->heap()->memory_allocator()->TearDown();
isolate_->heap()->memory_allocator_.swap(old_allocator_);
isolate_->GetCodePages()->swap(code_pages_);
}
private:
Isolate* isolate_;
std::unique_ptr<MemoryAllocator> old_allocator_;
std::vector<MemoryRange> code_pages_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment