Commit bb0454ac authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Handle NEVER_ALLOCATE_ON_PAGE pages in concurrent sweepers.

This avoids accessing the page flags of all old generation PagedSpace pages when starting sweeping.


Bug: v8:9093
Change-Id: Ibdfb35f3e368107f8c364c9498312b01edce47d1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1554688Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60789}
parent cde0d18c
...@@ -3785,20 +3785,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -3785,20 +3785,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
continue; continue;
} }
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// We need to sweep the page to get it into an iterable state again. Note
// that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
continue;
}
// One unused page is kept, all further are released before sweeping them. // One unused page is kept, all further are released before sweeping them.
if (non_atomic_marking_state()->live_bytes(p) == 0) { if (non_atomic_marking_state()->live_bytes(p) == 0) {
if (unused_page_present) { if (unused_page_present) {
......
...@@ -186,8 +186,10 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) { ...@@ -186,8 +186,10 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available(); added += category->available();
category->Relink(); category->Relink();
}); });
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes()); DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
page->AvailableInFreeList() ==
page->AvailableInFreeListFromAllocatedBytes());
return added; return added;
} }
......
...@@ -1543,6 +1543,12 @@ void PagedSpace::RefillFreeList() { ...@@ -1543,6 +1543,12 @@ void PagedSpace::RefillFreeList() {
{ {
Page* p = nullptr; Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) { while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
// entries here to make them unavailable for allocations.
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
p->ForAllFreeListCategories(
[](FreeListCategory* category) { category->Reset(); });
}
// Only during compaction pages can actually change ownership. This is // Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links // safe because there exists no other competing action on the page links
// during compaction. // during compaction.
...@@ -1583,8 +1589,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -1583,8 +1589,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Relinking requires the category to be unlinked. // Relinking requires the category to be unlinked.
other->RemovePage(p); other->RemovePage(p);
AddPage(p); AddPage(p);
DCHECK_EQ(p->AvailableInFreeList(), DCHECK_IMPLIES(
p->AvailableInFreeListFromAllocatedBytes()); !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
} }
DCHECK_EQ(0u, other->Size()); DCHECK_EQ(0u, other->Size());
DCHECK_EQ(0u, other->Capacity()); DCHECK_EQ(0u, other->Capacity());
...@@ -2896,7 +2903,6 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size, ...@@ -2896,7 +2903,6 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
void FreeListCategory::Free(Address start, size_t size_in_bytes, void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) { FreeMode mode) {
DCHECK(page()->CanAllocate());
FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start)); FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top()); free_space->set_next(top());
set_top(free_space); set_top(free_space);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment