Commit da3b2661 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Move to two-level free-list

Before this CL, free memory (FreeSpace) has been managed through a global free
list that contains single-linked lists of FreeSpace nodes for each size class.

We move away from this approach to a global two-level doubly-linked list that
refers to singly-linked lists of FreeSpace nodes on the corresponding pages.
This way we can refill on a page-level granularity. Furthermore, it also enables
constant-time eviction of pages from the free list.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1772733002

Cr-Commit-Position: refs/heads/master@{#34853}
parent 6c8fc936
......@@ -254,9 +254,6 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space()));
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
......@@ -496,9 +493,6 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
void MarkCompactCollector::StartSweeperThreads() {
DCHECK(free_list_old_space_.get()->IsEmpty());
DCHECK(free_list_code_space_.get()->IsEmpty());
DCHECK(free_list_map_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
......@@ -3245,28 +3239,12 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
template <MarkCompactCollector::SweepingParallelism mode>
static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
int size) {
if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
DCHECK(free_list == NULL);
return space->Free(start, size);
} else {
return size - free_list->Free(start, size);
}
}
// Sweeps a page. After sweeping the page can be iterated.
// Slots in live objects pointing into evacuation candidates are updated
// if requested.
......@@ -3275,8 +3253,7 @@ template <SweepingMode sweeping_mode,
MarkCompactCollector::SweepingParallelism parallelism,
SkipListRebuildingMode skip_list_mode,
FreeSpaceTreatmentMode free_space_mode>
static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
ObjectVisitor* v) {
static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
......@@ -3310,7 +3287,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
Map* map = object->synchronized_map();
......@@ -3338,7 +3315,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
freed_bytes = space->UnaccountedFree(free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
......@@ -3453,21 +3430,25 @@ void MarkCompactCollector::SweepAbortedPages() {
switch (space->identity()) {
case OLD_SPACE:
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
IGNORE_FREE_SPACE>(space, p, nullptr);
break;
case CODE_SPACE:
if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, nullptr);
ZAP_FREE_SPACE>(space, p, nullptr);
} else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
IGNORE_FREE_SPACE>(space, p, nullptr);
}
break;
default:
UNREACHABLE();
break;
}
{
base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
swept_pages(space->identity())->Add(p);
}
}
}
}
......@@ -3667,10 +3648,9 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : evacuation_candidates_) {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->ResetLiveBytes();
CHECK(p->SweepingDone());
space->ReleasePage(p, true);
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
......@@ -3709,25 +3689,20 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return 0;
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
FreeList* free_list;
FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) {
free_list = free_list_old_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
} else if (space->identity() == CODE_SPACE) {
free_list = free_list_code_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
} else {
free_list = free_list_map_space_.get();
max_freed =
Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, page, NULL);
}
{
base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
swept_pages(space->identity())->Add(page);
}
free_list->Concatenate(&private_free_list);
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
}
......@@ -3770,7 +3745,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
IGNORE_FREE_SPACE>(space, p, nullptr);
continue;
}
......@@ -3780,7 +3755,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (FLAG_gc_verbose) {
PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
space->ReleasePage(p, false);
space->ReleasePage(p);
continue;
}
unused_page_present = true;
......
......@@ -572,18 +572,19 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
//
// Free lists filled by sweeper and consumed by corresponding spaces
// (including compaction spaces).
//
base::SmartPointer<FreeList>& free_list_old_space() {
return free_list_old_space_;
}
base::SmartPointer<FreeList>& free_list_code_space() {
return free_list_code_space_;
base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
List<Page*>* swept_pages(AllocationSpace id) {
switch (id) {
case OLD_SPACE:
return &swept_old_space_pages_;
case CODE_SPACE:
return &swept_code_space_pages_;
case MAP_SPACE:
return &swept_map_space_pages_;
default:
UNREACHABLE();
}
base::SmartPointer<FreeList>& free_list_map_space() {
return free_list_map_space_;
return nullptr;
}
private:
......@@ -834,9 +835,10 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
base::Mutex swept_pages_mutex_;
List<Page*> swept_old_space_pages_;
List<Page*> swept_code_space_pages_;
List<Page*> swept_map_space_pages_;
SweepingList sweeping_list_old_space_;
SweepingList sweeping_list_code_space_;
......
......@@ -262,13 +262,21 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
owner->Free(page->area_start(), page->area_size());
return page;
}
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
}
}
void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
......@@ -314,6 +322,24 @@ bool PagedSpace::Contains(Object* o) {
return p->owner() == this;
}
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
DCHECK_EQ(free_list(), category->owner());
free_list()->RemoveCategory(category);
});
}
intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
intptr_t added = 0;
page->ForAllFreeListCategories([&added](FreeListCategory* category) {
added += category->available();
category->Relink();
});
return added;
}
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
......@@ -327,6 +353,27 @@ Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
void Page::MarkNeverAllocateForTesting() {
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
}
void Page::MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
SetFlag(EVACUATION_CANDIDATE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
}
void Page::ClearEvacuationCandidate() {
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
ClearFlag(EVACUATION_CANDIDATE);
InitializeFreeListCategories();
}
MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
: state_(kOldSpaceState),
mode_(mode),
......@@ -375,18 +422,29 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
void Page::set_next_page(Page* page) {
DCHECK(page->owner() == owner());
set_next_chunk(page);
}
void Page::set_prev_page(Page* page) {
DCHECK(page->owner() == owner());
set_prev_chunk(page);
}
Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this));
}
FreeList* FreeListCategory::owner() {
return reinterpret_cast<PagedSpace*>(
Page::FromAddress(reinterpret_cast<Address>(this))->owner())
->free_list();
}
bool FreeListCategory::is_linked() {
return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
}
// Try linear allocation in the page of alloc_info's allocation top. Does
// not contain slow case logic (e.g. move to the next page or try free list
......
......@@ -1030,79 +1030,46 @@ void PagedSpace::TearDown() {
accounting_stats_.Clear();
}
void PagedSpace::AddMemory(Address start, intptr_t size) {
accounting_stats_.ExpandSpace(static_cast<int>(size));
Free(start, static_cast<int>(size));
}
void PagedSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
FreeList* free_list = nullptr;
if (this == heap()->old_space()) {
free_list = collector->free_list_old_space().get();
} else if (this == heap()->code_space()) {
free_list = collector->free_list_code_space().get();
} else if (this == heap()->map_space()) {
free_list = collector->free_list_map_space().get();
} else {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
identity() != MAP_SPACE) {
return;
}
DCHECK(free_list != nullptr);
intptr_t added = free_list_.Concatenate(free_list);
accounting_stats_.IncreaseCapacity(added);
}
void CompactionSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
FreeList* free_list = nullptr;
if (identity() == OLD_SPACE) {
free_list = collector->free_list_old_space().get();
} else if (identity() == CODE_SPACE) {
free_list = collector->free_list_code_space().get();
} else {
// Compaction spaces only represent old or code space.
UNREACHABLE();
List<Page*>* swept_pages = collector->swept_pages(identity());
intptr_t added = 0;
{
base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
for (int i = swept_pages->length() - 1; i >= 0; --i) {
Page* p = (*swept_pages)[i];
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
if (is_local() && (p->owner() != this)) {
if (added > kCompactionMemoryWanted) break;
base::LockGuard<base::Mutex> guard(
reinterpret_cast<PagedSpace*>(p->owner())->mutex());
p->Unlink();
p->set_owner(this);
p->InsertAfter(anchor_.prev_page());
}
added += RelinkFreeListCategories(p);
added += p->wasted_memory();
swept_pages->Remove(i);
}
DCHECK(free_list != nullptr);
intptr_t refilled = 0;
while (refilled < kCompactionMemoryWanted) {
FreeSpace* node =
free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
if (node == nullptr) return;
refilled += node->size();
AddMemory(node->address(), node->size());
}
}
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
// (a) not waste the memory and
// (b) keep the rest of the chunk in an iterable state (filler is needed).
other->EmptyAllocationInfo();
// Move over the free list. Concatenate makes sure that the source free list
// gets properly reset after moving over all nodes.
intptr_t added = free_list_.Concatenate(other->free_list());
// Moved memory is not recorded as allocated memory, but rather increases and
// decreases capacity of the corresponding spaces.
other->accounting_stats_.DecreaseCapacity(added);
accounting_stats_.IncreaseCapacity(added);
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK(identity() == other->identity());
// Unmerged fields:
// area_size_
// anchor_
MoveOverFreeMemory(other);
other->EmptyAllocationInfo();
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
......@@ -1119,9 +1086,14 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
Page* p = nullptr;
while (it.has_next()) {
p = it.next();
// Relinking requires the category to be unlinked.
other->UnlinkFreeListCategories(p);
p->Unlink();
p->set_owner(this);
p->InsertAfter(anchor_.prev_page());
RelinkFreeListCategories(p);
}
}
......@@ -1238,17 +1210,12 @@ void PagedSpace::IncreaseCapacity(int size) {
accounting_stats_.ExpandSpace(size);
}
void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->LiveBytes(), 0);
DCHECK_EQ(AreaSize(), page->area_size());
DCHECK_EQ(page->owner(), this);
void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
DCHECK(page->LiveBytes() == 0);
DCHECK(AreaSize() == page->area_size());
if (evict_free_list_items) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size));
}
free_list_.EvictFreeListItems(page);
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
......@@ -1268,7 +1235,6 @@ void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
accounting_stats_.ShrinkSpace(AreaSize());
}
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
......@@ -2175,137 +2141,54 @@ size_t NewSpace::CommittedPhysicalMemory() {
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
DCHECK(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
category->end()->set_next(top());
}
set_top(category->top());
available_ += category->available();
category->Reset();
}
return free_bytes;
}
void FreeListCategory::Reset() {
set_top(nullptr);
set_end(nullptr);
set_prev(nullptr);
set_next(nullptr);
available_ = 0;
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
intptr_t sum = 0;
FreeSpace* prev_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
Page* page_for_node = Page::FromAddress(cur_node->address());
if (page_for_node == p) {
// FreeSpace node on eviction page found, unlink it.
int size = cur_node->size();
sum += size;
DCHECK((prev_node != nullptr) || (top() == cur_node));
if (cur_node == top()) {
set_top(cur_node->next());
}
if (cur_node == end()) {
set_end(prev_node);
}
if (prev_node != nullptr) {
prev_node->set_next(cur_node->next());
}
continue;
}
prev_node = cur_node;
}
p->add_available_in_free_list(-sum);
available_ -= sum;
return sum;
}
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
FreeSpace* node = top();
while (node != NULL) {
if (Page::FromAddress(node->address()) == p) return true;
node = node->next();
}
return false;
}
FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = top();
if (node == nullptr) return nullptr;
Page* page = Page::FromAddress(node->address());
while ((node != nullptr) && !page->CanAllocate()) {
available_ -= node->size();
page->add_available_in_free_list(-(node->Size()));
node = node->next();
}
if (node != nullptr) {
set_top(node->next());
*node_size = node->Size();
available_ -= *node_size;
} else {
set_top(nullptr);
}
if (top() == nullptr) {
set_end(nullptr);
}
return node;
}
FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
int* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = PickNodeFromList(node_size);
if ((node != nullptr) && (*node_size < size_in_bytes)) {
Free(node, *node_size);
if ((node != nullptr) && (*node_size < minimum_size)) {
Free(node, *node_size, kLinkCategory);
*node_size = 0;
return nullptr;
}
return node;
}
FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
int* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
int size = cur_node->size();
Page* page_for_node = Page::FromAddress(cur_node->address());
if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
// The node is either large enough or contained in an evacuation
// candidate. In both cases we need to unlink it from the list.
if (size >= minimum_size) {
available_ -= size;
if (cur_node == top()) {
set_top(cur_node->next());
}
if (cur_node == end()) {
set_end(prev_non_evac_node);
}
if (prev_non_evac_node != nullptr) {
prev_non_evac_node->set_next(cur_node->next());
}
// For evacuation candidates we continue.
if (!page_for_node->CanAllocate()) {
page_for_node->add_available_in_free_list(-size);
continue;
}
// Otherwise we have a large enough node and can return.
*node_size = size;
return cur_node;
}
......@@ -2315,14 +2198,17 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
return nullptr;
}
bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
FreeMode mode) {
if (!page()->CanAllocate()) return false;
void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
free_space->set_next(top());
set_top(free_space);
if (end_ == NULL) {
end_ = free_space;
}
available_ += size_in_bytes;
if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
owner()->AddCategory(this);
}
return true;
}
......@@ -2339,49 +2225,35 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
}
}
FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
}
Reset();
void FreeListCategory::Relink() {
DCHECK(!is_linked());
owner()->AddCategory(this);
}
void FreeListCategory::Invalidate() {
page()->add_available_in_free_list(-available());
Reset();
type_ = kInvalidCategory;
}
intptr_t FreeList::Concatenate(FreeList* other) {
intptr_t usable_bytes = 0;
intptr_t wasted_bytes = 0;
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order. Furthermore, we only lock if the PagedSpace containing
// the free list is know to be globally available, i.e., not local.
if (!owner()->is_local()) mutex_.Lock();
if (!other->owner()->is_local()) other->mutex()->Lock();
wasted_bytes = other->wasted_bytes_;
wasted_bytes_ += wasted_bytes;
other->wasted_bytes_ = 0;
FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
usable_bytes += category_[i].Concatenate(
other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
categories_[i] = nullptr;
}
if (!other->owner()->is_local()) other->mutex()->Unlock();
if (!owner()->is_local()) mutex_.Unlock();
return usable_bytes + wasted_bytes;
Reset();
}
void FreeList::Reset() {
ForAllFreeListCategories(
[](FreeListCategory* category) { category->Reset(); });
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
category_[i].Reset();
categories_[i] = nullptr;
}
ResetStats();
}
int FreeList::Free(Address start, int size_in_bytes) {
int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
if (size_in_bytes == 0) return 0;
owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
......@@ -2392,7 +2264,7 @@ int FreeList::Free(Address start, int size_in_bytes) {
// Blocks have to be a minimum size to hold free list items.
if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes);
wasted_bytes_ += size_in_bytes;
wasted_bytes_.Increment(size_in_bytes);
return size_in_bytes;
}
......@@ -2400,16 +2272,34 @@ int FreeList::Free(Address start, int size_in_bytes) {
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
category_[type].Free(free_space, size_in_bytes);
if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
page->add_available_in_free_list(size_in_bytes);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
return 0;
}
FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
FreeListCategory* current = it.Next();
node = current->PickNodeFromList(node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
->add_available_in_free_list(-(*node_size));
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
RemoveCategory(current);
}
return node;
}
FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
int minimum_size) {
if (categories_[type] == nullptr) return nullptr;
FreeSpace* node =
categories_[type]->TryPickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
->add_available_in_free_list(-(*node_size));
......@@ -2418,6 +2308,22 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
return node;
}
FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
int* node_size, int minimum_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
FreeListCategory* current = it.Next();
node = current->SearchForNodeInList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
->add_available_in_free_list(-(*node_size));
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
}
return node;
}
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeSpace* node = nullptr;
......@@ -2434,10 +2340,8 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
// Next search the huge list for free list nodes. This takes linear time in
// the number of huge elements.
node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
if (node != nullptr) {
page = Page::FromAddress(node->address());
page->add_available_in_free_list(-(*node_size));
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
......@@ -2449,7 +2353,7 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
// Now search the best fitting free list for a node that has at least the
// requested size.
type = SelectFreeListCategoryType(size_in_bytes);
node = category_[type].PickNodeFromList(size_in_bytes, node_size);
node = TryFindNodeIn(type, node_size, size_in_bytes);
if (node != nullptr) {
DCHECK(size_in_bytes <= *node_size);
page = Page::FromAddress(node->address());
......@@ -2460,38 +2364,6 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
return node;
}
FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
base::LockGuard<base::Mutex> guard(&mutex_);
FreeSpace* node = nullptr;
int node_size = 0;
// Try to find a node that fits exactly.
node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
// If no node could be found get as much memory as possible.
if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
if (node != nullptr) {
// We round up the size to (kMinBlockSize + kPointerSize) to (a) have a
// size larger then the minimum size required for FreeSpace, and (b) to get
// a block that can actually be freed into some FreeList later on.
if (hint_size_in_bytes <= kMinBlockSize) {
hint_size_in_bytes = kMinBlockSize + kPointerSize;
}
// Give back left overs that were not required by {size_in_bytes}.
intptr_t left_over = node_size - hint_size_in_bytes;
// Do not bother to return anything below {kMinBlockSize} as it would be
// immediately discarded anyways.
if (left_over > kMinBlockSize) {
Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
node->set_size(static_cast<int>(hint_size_in_bytes));
}
}
return node;
}
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
......@@ -2565,32 +2437,76 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
return new_node;
}
intptr_t FreeList::EvictFreeListItems(Page* page) {
intptr_t sum = 0;
page->ForAllFreeListCategories(
[this, &sum, page](FreeListCategory* category) {
DCHECK_EQ(this, category->owner());
sum += category->available();
RemoveCategory(category);
category->Invalidate();
});
return sum;
}
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
if (sum < p->area_size()) {
for (int i = kFirstCategory; i <= kLarge; i++) {
sum += category_[i].EvictFreeListItemsInList(p);
bool FreeList::ContainsPageFreeListItems(Page* page) {
bool contained = false;
page->ForAllFreeListCategories(
[this, &contained](FreeListCategory* category) {
if (category->owner() == this && category->is_linked()) {
contained = true;
}
}
return sum;
});
return contained;
}
void FreeList::RepairLists(Heap* heap) {
ForAllFreeListCategories(
[heap](FreeListCategory* category) { category->RepairFreeList(heap); });
}
bool FreeList::ContainsPageFreeListItems(Page* p) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
if (category_[i].EvictFreeListItemsInList(p)) {
bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
if (top == category) return false;
// Common double-linked list insertion.
if (top != nullptr) {
top->set_prev(category);
}
category->set_next(top);
categories_[type] = category;
return true;
}
void FreeList::RemoveCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
FreeListCategory* top = categories_[type];
// Common double-linked list removal.
if (top == category) {
categories_[type] = category->next();
}
if (category->prev() != nullptr) {
category->prev()->set_next(category->next());
}
return false;
if (category->next() != nullptr) {
category->next()->set_prev(category->prev());
}
category->set_next(nullptr);
category->set_prev(nullptr);
}
void FreeList::RepairLists(Heap* heap) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
category_[i].RepairFreeList(heap);
void FreeList::PrintCategories(FreeListCategoryType type) {
FreeListCategoryIterator it(this, type);
PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
while (it.HasNext()) {
FreeListCategory* current = it.Next();
PrintF("%p -> ", current);
}
PrintF("null\n");
}
......@@ -2606,7 +2522,6 @@ intptr_t FreeListCategory::SumFreeList() {
return sum;
}
int FreeListCategory::FreeListLength() {
int length = 0;
FreeSpace* cur = top();
......@@ -2618,16 +2533,13 @@ int FreeListCategory::FreeListLength() {
return length;
}
bool FreeListCategory::IsVeryLong() {
return FreeListLength() == kVeryLongFreeList;
}
bool FreeList::IsVeryLong() {
int len = 0;
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
if (category_[i].IsVeryLong()) {
return true;
FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
while (it.HasNext()) {
len += it.Next()->FreeListLength();
if (len >= FreeListCategory::kVeryLongFreeList) return true;
}
}
return false;
......@@ -2639,9 +2551,8 @@ bool FreeList::IsVeryLong() {
// kVeryLongFreeList.
intptr_t FreeList::SumFreeLists() {
intptr_t sum = 0;
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
sum += category_[i].SumFreeList();
}
ForAllFreeListCategories(
[&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
return sum;
}
#endif
......
......@@ -27,6 +27,7 @@ class FreeList;
class Isolate;
class MemoryAllocator;
class MemoryChunk;
class Page;
class PagedSpace;
class SemiSpace;
class SkipList;
......@@ -287,6 +288,113 @@ class Bitmap {
}
};
enum FreeListCategoryType {
kTiniest,
kTiny,
kSmall,
kMedium,
kLarge,
kHuge,
kFirstCategory = kTiniest,
kLastCategory = kHuge,
kNumberOfCategories = kLastCategory + 1,
kInvalidCategory
};
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
static const int kSize = kIntSize + // FreeListCategoryType type_
kIntSize + // int available_
kPointerSize + // FreeSpace* top_
kPointerSize + // FreeListCategory* prev_
kPointerSize; // FreeListCategory* next_
FreeListCategory()
: type_(kInvalidCategory),
available_(0),
top_(nullptr),
prev_(nullptr),
next_(nullptr) {}
void Initialize(FreeListCategoryType type) {
type_ = type;
available_ = 0;
top_ = nullptr;
prev_ = nullptr;
next_ = nullptr;
}
void Invalidate();
void Reset();
void ResetStats() { Reset(); }
void RepairFreeList(Heap* heap);
// Relinks the category into the currently owning free list. Requires that the
// category is currently unlinked.
void Relink();
bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
FreeSpace* PickNodeFromList(int* node_size);
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.
FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
inline FreeList* owner();
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
int available() const { return available_; }
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
#endif
private:
// For debug builds we accurately compute free lists lengths up until
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
inline Page* page();
FreeSpace* top() { return top_; }
void set_top(FreeSpace* top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
void set_prev(FreeListCategory* prev) { prev_ = prev; }
FreeListCategory* next() { return next_; }
void set_next(FreeListCategory* next) { next_ = next; }
// |type_|: The type of this free list category.
FreeListCategoryType type_;
// |available_|: Total available bytes in all blocks of this free list
// category.
int available_;
// |top_|: Points to the top FreeSpace* in the free list category.
FreeSpace* top_;
FreeListCategory* prev_;
FreeListCategory* next_;
friend class FreeList;
friend class PagedSpace;
};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
......@@ -397,7 +505,9 @@ class MemoryChunk {
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+ 2 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize; // AtomicValue prev_chunk_
+ kPointerSize // AtomicValue prev_chunk_
// FreeListCategory categories_[kNumberOfCategories]
+ FreeListCategory::kSize * kNumberOfCategories;
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
......@@ -583,19 +693,6 @@ class MemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
SetFlag(EVACUATION_CANDIDATE);
}
void ClearEvacuationCandidate() {
DCHECK_NULL(old_to_old_slots_);
DCHECK_NULL(typed_old_to_old_slots_);
ClearFlag(EVACUATION_CANDIDATE);
}
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
......@@ -705,6 +802,8 @@ class MemoryChunk {
// prev_chunk_ holds a pointer of type MemoryChunk
AtomicValue<MemoryChunk*> prev_chunk_;
FreeListCategory categories_[kNumberOfCategories];
private:
void InitializeReservedMemory() { reservation_.Reset(); }
......@@ -712,19 +811,6 @@ class MemoryChunk {
friend class MemoryChunkValidator;
};
enum FreeListCategoryType {
kTiniest,
kTiny,
kSmall,
kMedium,
kLarge,
kHuge,
kFirstCategory = kTiniest,
kLastCategory = kHuge,
kNumberOfCategories = kLastCategory + 1
};
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
......@@ -820,6 +906,17 @@ class Page : public MemoryChunk {
available_in_free_list());
}
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
callback(&categories_[i]);
}
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
return &categories_[type];
}
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
type name() { return name##_.Value(); } \
void set_##name(type name) { name##_.SetValue(name); } \
......@@ -834,6 +931,13 @@ class Page : public MemoryChunk {
void Print();
#endif // DEBUG
inline void MarkNeverAllocateForTesting();
inline void MarkEvacuationCandidate();
inline void ClearEvacuationCandidate();
private:
inline void InitializeFreeListCategories();
friend class MemoryAllocator;
};
......@@ -1473,12 +1577,6 @@ class AllocationStats BASE_EMBEDDED {
void ClearSize() { size_ = capacity_; }
// Reset the allocation statistics (i.e., available = capacity with no wasted
// or allocated bytes).
void Reset() {
size_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
......@@ -1505,13 +1603,13 @@ class AllocationStats BASE_EMBEDDED {
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
CHECK(size_ >= 0);
CHECK_GE(size_, 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
CHECK(size_ >= 0);
CHECK_GE(size_, 0);
}
// Free allocated bytes, making them available (size -> available).
......@@ -1550,80 +1648,6 @@ class AllocationStats BASE_EMBEDDED {
intptr_t size_;
};
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
void Initialize(FreeList* owner, FreeListCategoryType type) {
owner_ = owner;
type_ = type;
}
// Concatenates {category} into {this}.
//
// Note: Thread-safe.
intptr_t Concatenate(FreeListCategory* category);
void Reset();
void Free(FreeSpace* node, int size_in_bytes);
// Pick a node from the list.
FreeSpace* PickNodeFromList(int* node_size);
// Pick a node from the list and compare it against {size_in_bytes}. If the
// node's size is greater or equal return the node and null otherwise.
FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
// Search for a node of size {size_in_bytes}.
FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
bool IsEmpty() { return top() == nullptr; }
FreeList* owner() { return owner_; }
int available() const { return available_; }
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
bool IsVeryLong();
#endif
private:
// For debug builds we accurately compute free lists lengths up until
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
FreeSpace* top() { return top_.Value(); }
void set_top(FreeSpace* top) { top_.SetValue(top); }
FreeSpace* end() const { return end_; }
void set_end(FreeSpace* end) { end_ = end; }
// |type_|: The type of this free list category.
FreeListCategoryType type_;
// |top_|: Points to the top FreeSpace* in the free list category.
AtomicValue<FreeSpace*> top_;
// |end_|: Points to the end FreeSpace* in the free list category.
FreeSpace* end_;
// |available_|: Total available bytes in all blocks of this free list
// category.
int available_;
// |owner_|: The owning free list of this category.
FreeList* owner_;
};
// A free list maintaining free blocks of memory. The free list is organized in
// a way to encourage objects allocated around the same time to be near each
// other. The normal way to allocate is intended to be by bumping a 'top'
......@@ -1668,19 +1692,13 @@ class FreeList {
explicit FreeList(PagedSpace* owner);
// The method concatenates {other} into {this} and returns the added bytes,
// including waste.
//
// Note: Thread-safe.
intptr_t Concatenate(FreeList* other);
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
// bytes that were not added to the free list, because they freed memory block
// was too small. Bookkeeping information will be written to the block, i.e.,
// its contents will be destroyed. The start address should be word aligned,
// and the size should be a non-zero multiple of the word size.
int Free(Address start, int size_in_bytes);
int Free(Address start, int size_in_bytes, FreeMode mode);
// Allocate a block of size {size_in_bytes} from the free list. The block is
// unitialized. A failure is returned if no block is available. The size
......@@ -1690,49 +1708,83 @@ class FreeList {
// Clear the free list.
void Reset();
void ResetStats() { wasted_bytes_ = 0; }
void ResetStats() {
wasted_bytes_.SetValue(0);
ForAllFreeListCategories(
[](FreeListCategory* category) { category->ResetStats(); });
}
// Return the number of bytes available on the free list.
intptr_t Available() {
intptr_t available = 0;
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
available += category_[i].available();
}
ForAllFreeListCategories([&available](FreeListCategory* category) {
available += category->available();
});
return available;
}
// The method tries to find a {FreeSpace} node of at least {size_in_bytes}
// size in the free list category exactly matching the size. If no suitable
// node could be found, the method falls back to retrieving a {FreeSpace}
// from the large or huge free list category.
//
// Can be used concurrently.
MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
bool IsEmpty() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
if (!category_[i].IsEmpty()) return false;
}
return true;
bool empty = true;
ForAllFreeListCategories([&empty](FreeListCategory* category) {
if (!category->is_empty()) empty = false;
});
return empty;
}
// Used after booting the VM.
void RepairLists(Heap* heap);
intptr_t EvictFreeListItems(Page* p);
bool ContainsPageFreeListItems(Page* p);
intptr_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
PagedSpace* owner() { return owner_; }
intptr_t wasted_bytes() { return wasted_bytes_; }
base::Mutex* mutex() { return &mutex_; }
intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
template <typename Callback>
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
FreeListCategory* current = categories_[type];
while (current != nullptr) {
FreeListCategory* next = current->next();
callback(current);
current = next;
}
}
template <typename Callback>
void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
}
}
bool AddCategory(FreeListCategory* category);
void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
#ifdef DEBUG
void Zap();
intptr_t SumFreeLists();
bool IsVeryLong();
#endif
private:
class FreeListCategoryIterator {
public:
FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
: current_(free_list->categories_[type]) {}
bool HasNext() { return current_ != nullptr; }
FreeListCategory* Next() {
DCHECK(HasNext());
FreeListCategory* tmp = current_;
current_ = current_->next();
return tmp;
}
private:
FreeListCategory* current_;
};
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kAllocatableMemory;
......@@ -1748,11 +1800,19 @@ class FreeList {
static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
return &category_[category];
}
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty.
FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
int minimum_size);
// Searches a given |type| for a node of at least |minimum_size|.
FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
int minimum_size);
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
......@@ -1782,10 +1842,13 @@ class FreeList {
return kHuge;
}
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
PagedSpace* owner_;
base::Mutex mutex_;
intptr_t wasted_bytes_;
FreeListCategory category_[kNumberOfCategories];
AtomicNumber<intptr_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
......@@ -1887,7 +1950,6 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
class PagedSpace : public Space {
public:
static const intptr_t kCompactionMemoryWanted = 500 * KB;
......@@ -1944,11 +2006,6 @@ class PagedSpace : public Space {
ResetFreeListStatistics();
}
// Increases the number of available bytes of that space.
void AddToAccountingStats(intptr_t bytes) {
accounting_stats_.DeallocateBytes(bytes);
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
......@@ -2004,11 +2061,16 @@ class PagedSpace : public Space {
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
int Free(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes);
int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
accounting_stats_.DeallocateBytes(size_in_bytes);
return size_in_bytes - wasted;
}
int UnaccountedFree(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
return size_in_bytes - wasted;
}
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
......@@ -2033,7 +2095,7 @@ class PagedSpace : public Space {
void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page, bool evict_free_list_items);
void ReleasePage(Page* page);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
......@@ -2089,17 +2151,18 @@ class PagedSpace : public Space {
// sweeper.
virtual void RefillFreeList();
protected:
void AddMemory(Address start, intptr_t size);
FreeList* free_list() { return &free_list_; }
void MoveOverFreeMemory(PagedSpace* other);
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
inline intptr_t RelinkFreeListCategories(Page* page);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
FreeList* free_list() { return &free_list_; }
bool HasPages() { return anchor_.next_page() != &anchor_; }
// Cleans up the space, frees all pages in this space except those belonging
......@@ -2793,8 +2856,6 @@ class CompactionSpace : public PagedSpace {
bool is_local() override { return true; }
void RefillFreeList() override;
protected:
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
......
......@@ -35,7 +35,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
{
......@@ -80,7 +80,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
HandleScope scope1(isolate);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
{
......@@ -155,7 +155,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
Page* to_be_aborted_page = nullptr;
......@@ -241,7 +241,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
isolate->factory()->NewFixedArray(10, TENURED);
PageIterator it(heap->old_space());
while (it.has_next()) {
it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
it.next()->MarkNeverAllocateForTesting();
}
Page* to_be_aborted_page = nullptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment