Commit da3b2661 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Move to two-level free-list

Before this CL, free memory (FreeSpace) has been managed through a global free
list that contains single-linked lists of FreeSpace nodes for each size class.

We move away from this approach to a global two-level doubly-linked list that
refers to singly-linked lists of FreeSpace nodes on the corresponding pages.
This way we can refill on a page-level granularity. Furthermore, it also enables
constant-time eviction of pages from the free list.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1772733002

Cr-Commit-Position: refs/heads/master@{#34853}
parent 6c8fc936
......@@ -254,9 +254,6 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space()));
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
......@@ -496,9 +493,6 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() {
DCHECK(free_list_old_space_.get()->IsEmpty());
DCHECK(free_list_code_space_.get()->IsEmpty());
DCHECK(free_list_map_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
......@@ -3245,28 +3239,12 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
template <MarkCompactCollector::SweepingParallelism mode>
static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
int size) {
if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
DCHECK(free_list == NULL);
return space->Free(start, size);
} else {
return size - free_list->Free(start, size);
}
}
// Sweeps a page. After sweeping the page can be iterated.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
......@@ -3275,8 +3253,7 @@ template <SweepingMode sweeping_mode,
MarkCompactCollector::SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
ObjectVisitor* v) {
static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
......@@ -3310,7 +3287,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
Map* map = object->synchronized_map();
......@@ -3338,7 +3315,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
......@@ -3453,21 +3430,25 @@ void MarkCompactCollector::SweepAbortedPages() {
switch (space->identity()) {
case OLD_SPACE:
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
IGNORE_FREE_SPACE>(space, p, nullptr);
break;
case CODE_SPACE:
if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, nullptr);
ZAP_FREE_SPACE>(space, p, nullptr);
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
IGNORE_FREE_SPACE>(space, p, nullptr);
}
break;
default:
UNREACHABLE();
break;
}
{
base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
swept_pages(space->identity())->Add(p);
}
}
}
}
......@@ -3667,10 +3648,9 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : evacuation_candidates_) {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->ResetLiveBytes();
CHECK(p->SweepingDone());
space->ReleasePage(p, true);
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
......@@ -3709,25 +3689,20 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return 0;
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
FreeList* free_list;
FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) {
free_list = free_list_old_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
} else if (space->identity() == CODE_SPACE) {
free_list = free_list_code_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
} else {
free_list = free_list_map_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
}
{
base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
swept_pages(space->identity())->Add(page);
}
free_list->Concatenate(&private_free_list);
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
}
......@@ -3770,7 +3745,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
IGNORE_FREE_SPACE>(space, p, nullptr);
continue;
}
......@@ -3780,7 +3755,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (FLAG_gc_verbose) {
PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
space->ReleasePage(p, false);
space->ReleasePage(p);
continue;
}
unused_page_present = true;
......
......@@ -572,18 +572,19 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
//
// Free lists filled by sweeper and consumed by corresponding spaces
// (including compaction spaces).
//
base::SmartPointer<FreeList>& free_list_old_space() {
return free_list_old_space_;
}
base::SmartPointer<FreeList>& free_list_code_space() {
return free_list_code_space_;
}
base::SmartPointer<FreeList>& free_list_map_space() {
return free_list_map_space_;
base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
List<Page*>* swept_pages(AllocationSpace id) {
switch (id) {
case OLD_SPACE:
return &swept_old_space_pages_;
case CODE_SPACE:
return &swept_code_space_pages_;
case MAP_SPACE:
return &swept_map_space_pages_;
default:
UNREACHABLE();
}
return nullptr;
}
private:
......@@ -834,9 +835,10 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
base::Mutex swept_pages_mutex_;
List<Page*> swept_old_space_pages_;
List<Page*> swept_code_space_pages_;
List<Page*> swept_map_space_pages_;
SweepingList sweeping_list_old_space_;
SweepingList sweeping_list_code_space_;
......
......@@ -262,13 +262,21 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
owner->Free(page->area_start(), page->area_size());
return page;
}
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
}
}
void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
......@@ -314,6 +322,24 @@ bool PagedSpace::Contains(Object* o) {
return p->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
DCHECK_EQ(free_list(), category->owner());
free_list()->RemoveCategory(category);
});
}
intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
intptr_t added = 0;
page->ForAllFreeListCategories([&added](FreeListCategory* category) {
added += category->available();
category->Relink();
});
return added;
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
......@@ -327,6 +353,27 @@ Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
}
void Page::MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
SetFlag(EVACUATION_CANDIDATE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
}
void Page::ClearEvacuationCandidate() {
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
ClearFlag(EVACUATION_CANDIDATE);
InitializeFreeListCategories();
}
MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
: state_(kOldSpaceState),
mode_(mode),
......@@ -375,18 +422,29 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
void Page::set_next_page(Page* page) {
DCHECK(page->owner() == owner());
set_next_chunk(page);
}
void Page::set_prev_page(Page* page) {
DCHECK(page->owner() == owner());
set_prev_chunk(page);
}
Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this));
}
FreeList* FreeListCategory::owner() {
return reinterpret_cast<PagedSpace*>(
Page::FromAddress(reinterpret_cast<Address>(this))->owner())
->free_list();
}
bool FreeListCategory::is_linked() {
return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
}
// Try linear allocation in the page of alloc_info's allocation top. Does
// not contain slow case logic (e.g. move to the next page or try free list
......
This diff is collapsed.
This diff is collapsed.
......@@ -35,7 +35,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
{
......@@ -80,7 +80,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
{
......@@ -155,7 +155,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
Page* to_be_aborted_page = nullptr;
......@@ -241,7 +241,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
isolate->factory()->NewFixedArray(10, TENURED);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
Page* to_be_aborted_page = nullptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment