Commit 027e5888 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

Revert "[heap] Fix tracking of code pages for V8 stack unwinder"

This reverts commit af5f437c.

Reason for revert: Seems to break TSAN - https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20TSAN/33286?

Original change's description:
> [heap] Fix tracking of code pages for V8 stack unwinder
> 
> When a compaction space allocates a new code page, that pages needs to
> be added to the Isolate::code_pages_ array used for stack unwinding.
> Since the array is owned by the main thread, compaction thread cannot
> directly modify it. Because of that code pages are added upon merging
> of the compaction space to the main spage in MergeLocalSpace.
> 
> The bug was that all code pages coming from the compaction space
> were added to the code_pages_ array. However, some of the pages are
> not newly allocated but merely borrowed from the main space.
> 
> This CL introduces a new page flag for marking pages that are borrowed
> during compaction and skips them in MergeLocalSpace.
> 
> Bug: v8:10900
> Change-Id: I786dc5747bd7c785ae58dfd8b841c00774efb15e
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2416500
> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69992}

TBR=ulan@chromium.org,jkummerow@chromium.org,dinfuehr@chromium.org

Change-Id: I13f8b64014750af95423166152dc9bee8cec12d0
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:10900
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2418395Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69993}
parent af5f437c
......@@ -4583,33 +4583,11 @@ SaveAndSwitchContext::SaveAndSwitchContext(Isolate* isolate,
#ifdef DEBUG
AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
namespace {
bool Overlapping(const MemoryRange& a, const MemoryRange& b) {
uintptr_t a1 = reinterpret_cast<uintptr_t>(a.start);
uintptr_t a2 = a1 + a.length_in_bytes;
uintptr_t b1 = reinterpret_cast<uintptr_t>(b.start);
uintptr_t b2 = b1 + b.length_in_bytes;
// Either b1 or b2 are in the [a1, a2) range.
return (a1 <= b1 && b1 < a2) || (a1 <= b2 && b2 < a2);
}
} // anonymous namespace
#endif // DEBUG
void Isolate::AddCodeMemoryRange(MemoryRange range) {
std::vector<MemoryRange>* old_code_pages = GetCodePages();
DCHECK_NOT_NULL(old_code_pages);
#ifdef DEBUG
auto overlapping = [range](const MemoryRange& a) {
return Overlapping(range, a);
};
DCHECK_EQ(old_code_pages->end(),
std::find_if(old_code_pages->begin(), old_code_pages->end(),
overlapping));
#endif
std::vector<MemoryRange>* new_code_pages;
if (old_code_pages == &code_pages_buffer1_) {
......@@ -4723,7 +4701,7 @@ void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) {
[removed_page_start](const MemoryRange& range) {
return range.start == removed_page_start;
});
DCHECK_EQ(old_code_pages->size(), new_code_pages->size() + 1);
// Atomically switch out the pointer
SetCodePages(new_code_pages);
#endif // !defined(V8_TARGET_ARCH_ARM)
......
......@@ -106,11 +106,6 @@ class BasicMemoryChunk {
// because there exists a potential pointer to somewhere in the chunk which
// can't be updated.
PINNED = 1u << 22,
// The memory chunk was borrowed by a compaction space for evacuation.
// This bit helps to distinguish memory chunks allocated by the compaction
// space from freshly allocated memory chunks.
BORROWED_BY_COMPACTION_SPACE = 1u << 23
};
static const intptr_t kAlignment =
......
......@@ -135,7 +135,7 @@ void PagedSpace::RefillFreeList() {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
owner->BorrowPage(p);
owner->RemovePage(p);
added += AddPage(p);
} else {
base::MutexGuard guard(mutex());
......@@ -179,12 +179,8 @@ void PagedSpace::MergeLocalSpace(LocalSpace* other) {
// Relinking requires the category to be unlinked.
other->RemovePage(p);
if (p->IsFlagSet(Page::BORROWED_BY_COMPACTION_SPACE)) {
AddBorrowedPage(p);
} else {
AddPage(p);
heap()->NotifyOldGenerationExpansion(identity(), p);
}
AddPage(p);
heap()->NotifyOldGenerationExpansion(identity(), p);
DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
......@@ -236,6 +232,14 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
marking_state->SetLiveBytes(page, 0);
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
}
size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone());
page->set_owner(this);
......@@ -263,26 +267,6 @@ void PagedSpace::RemovePage(Page* page) {
}
}
size_t PagedSpace::AddBorrowedPage(Page* page) {
DCHECK(page->IsFlagSet(Page::BORROWED_BY_COMPACTION_SPACE));
page->ClearFlag(Page::BORROWED_BY_COMPACTION_SPACE);
return AddPage(page);
}
void PagedSpace::BorrowPage(Page* page) {
DCHECK(!page->IsFlagSet(Page::BORROWED_BY_COMPACTION_SPACE));
page->SetFlag(Page::BORROWED_BY_COMPACTION_SPACE);
RemovePage(page);
}
Page* PagedSpace::FindAndBorrowPage(int size_in_bytes) {
base::MutexGuard guard(mutex());
Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
BorrowPage(page);
return page;
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
......@@ -468,9 +452,7 @@ void PagedSpace::ReleasePage(Page* page) {
SetTopAndLimit(kNullAddress, kNullAddress);
}
if (identity() == CODE_SPACE) {
heap()->isolate()->RemoveCodeMemoryChunk(page);
}
heap()->isolate()->RemoveCodeMemoryChunk(page);
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
......@@ -879,10 +861,10 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
}
if (is_compaction_space()) {
// The main thread may have acquired all swept pages. Try to borrow from
// The main thread may have acquired all swept pages. Try to steal from
// it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->FindAndBorrowPage(size_in_bytes);
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
AddPage(page);
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
......
......@@ -210,14 +210,9 @@ class V8_EXPORT_PRIVATE PagedSpace
// free list of the space.
size_t AddPage(Page* page);
void RemovePage(Page* page);
// Adds the page that was previously borrowed to this space and returns
// returns the number of bytes added to the free list of the space.
size_t AddBorrowedPage(Page* page);
// Borrow a page for a compaction space.
void BorrowPage(Page* page);
// Find a page that has at least |size_in_bytes| bytes available that can
// be used for allocation and borrow it.
Page* FindAndBorrowPage(int size_in_bytes);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void SetReadable();
void SetReadAndExecutable();
......
......@@ -7272,33 +7272,6 @@ HEAP_TEST(CodeLargeObjectSpace) {
heap->RemoveHeapObjectAllocationTracker(&allocation_tracker);
}
TEST(Regress10900) {
FLAG_always_compact = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope handle_scope(isolate);
i::byte buffer[i::Assembler::kDefaultBufferSize];
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer, sizeof(buffer)));
masm.Push(ReadOnlyRoots(heap).undefined_value_handle());
CodeDesc desc;
masm.GetCode(isolate, &desc);
Handle<Code> code =
Factory::CodeBuilder(isolate, desc, CodeKind::STUB).Build();
{
// Generate multiple code pages.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (int i = 0; i < 100; i++) {
factory->CopyCode(code);
}
}
// Force garbage collection that compacts code pages and triggers
// an assertion in Isolate::AddCodeMemoryRange before the bug fix.
CcTest::CollectAllAvailableGarbage();
}
} // namespace heap
} // namespace internal
} // namespace v8
......
......@@ -57,8 +57,6 @@ class TestMemoryAllocatorScope {
PageAllocator* page_allocator = nullptr)
: isolate_(isolate),
old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
// Save the code pages for restoring them later on because
isolate->GetCodePages()->swap(code_pages_);
isolate->heap()->memory_allocator_.reset(
new MemoryAllocator(isolate, max_capacity, code_range_size));
if (page_allocator != nullptr) {
......@@ -71,13 +69,12 @@ class TestMemoryAllocatorScope {
~TestMemoryAllocatorScope() {
isolate_->heap()->memory_allocator()->TearDown();
isolate_->heap()->memory_allocator_.swap(old_allocator_);
isolate_->GetCodePages()->swap(code_pages_);
}
private:
Isolate* isolate_;
std::unique_ptr<MemoryAllocator> old_allocator_;
std::vector<MemoryRange> code_pages_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment