Commit f4583702 authored by Darius Mercadier's avatar Darius Mercadier Committed by Commit Bot

[heap] Fix perf bug on PagedSpace::Available()

PagedSpace::Available() is mostly used for counters. One may expect
that it'd be constant time or bearly noticeable performance-wise, but
its cost is linear in the number of freelists and number of pages in
the freelists. Overall, d8 --prof showed that it has a important
runtime cost, and prevents freelists from scaling.

This CL makes this counter constant-time, and should improve
performances, even using with our current FreeList strategy
(FreeListLegacy).

Bug: v8:9329
Bug: v8:9093
Change-Id: I7682c5debc78498fe46e8dbce70b2fbd540b0fd0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1746473
Commit-Queue: Darius Mercadier <dmercadier@google.com>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63197}
parent 48d56283
......@@ -173,8 +173,8 @@ void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
DCHECK_EQ(free_list(), category->owner());
category->set_free_list(nullptr);
free_list()->RemoveCategory(category);
category->set_free_list(nullptr);
});
}
......@@ -322,6 +322,12 @@ bool FreeListCategory::is_linked() {
free_list_->categories_[type_] == this;
}
void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
available_ -= allocation_size;
length_--;
free_list_->DecreaseAvailableBytes(allocation_size);
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
......
......@@ -1991,7 +1991,6 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
size_t new_node_size = 0;
FreeSpace new_node = free_list_->Allocate(size_in_bytes, &new_node_size);
if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
......@@ -2912,6 +2911,9 @@ size_t NewSpace::CommittedPhysicalMemory() {
void FreeListCategory::Reset() {
if (is_linked() && !top().is_null()) {
owner()->DecreaseAvailableBytes(available_);
}
set_top(FreeSpace());
set_prev(nullptr);
set_next(nullptr);
......@@ -2930,8 +2932,7 @@ FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
}
set_top(node.next());
*node_size = node.Size();
available_ -= *node_size;
length_--;
UpdateCountersAfterAllocation(*node_size);
return node;
}
......@@ -2944,8 +2945,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
size_t size = cur_node.size();
if (size >= minimum_size) {
DCHECK_GE(available_, size);
available_ -= size;
length_--;
UpdateCountersAfterAllocation(size);
if (cur_node == top()) {
set_top(cur_node.next());
}
......@@ -2972,8 +2972,12 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
set_top(free_space);
available_ += size_in_bytes;
length_++;
if ((mode == kLinkCategory) && !is_linked()) {
owner()->AddCategory(this);
if (mode == kLinkCategory) {
if (is_linked()) {
owner()->IncreaseAvailableBytes(size_in_bytes);
} else {
owner()->AddCategory(this);
}
}
}
......@@ -3278,6 +3282,7 @@ void FreeList::Reset() {
categories_[i] = nullptr;
}
wasted_bytes_ = 0;
available_ = 0;
}
size_t FreeList::EvictFreeListItems(Page* page) {
......@@ -3321,6 +3326,8 @@ bool FreeList::AddCategory(FreeListCategory* category) {
}
category->set_next(top);
categories_[type] = category;
IncreaseAvailableBytes(category->available());
return true;
}
......@@ -3329,6 +3336,10 @@ void FreeList::RemoveCategory(FreeListCategory* category) {
DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
if (category->is_linked()) {
DecreaseAvailableBytes(category->available());
}
// Common double-linked list removal.
if (top == category) {
categories_[type] = category->next();
......
......@@ -190,6 +190,10 @@ class FreeListCategory {
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
// Updates |available_|, |length_| and free_list_->Available() after an
// allocation of size |allocation_size|.
inline void UpdateCountersAfterAllocation(size_t allocation_size);
FreeSpace top() { return top_; }
void set_top(FreeSpace top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
......@@ -266,13 +270,14 @@ class FreeList {
// Return the number of bytes available on the free list.
size_t Available() {
size_t available = 0;
ForAllFreeListCategories([&available](FreeListCategory* category) {
available += category->available();
});
return available;
DCHECK(available_ == SumFreeLists());
return available_;
}
// Update number of available bytes on the Freelists.
void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
bool IsEmpty() {
bool empty = true;
ForAllFreeListCategories([&empty](FreeListCategory* category) {
......@@ -313,11 +318,6 @@ class FreeList {
V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
#ifdef DEBUG
size_t SumFreeLists();
bool IsVeryLong();
#endif
protected:
class FreeListCategoryIterator final {
public:
......@@ -337,6 +337,11 @@ class FreeList {
FreeListCategory* current_;
};
#ifdef DEBUG
V8_EXPORT_PRIVATE size_t SumFreeLists();
bool IsVeryLong();
#endif
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty or the top entry is smaller
// than minimum_size.
......@@ -367,6 +372,9 @@ class FreeList {
std::atomic<size_t> wasted_bytes_{0};
FreeListCategory** categories_ = nullptr;
// |available_|: The number of bytes in this freelist.
std::atomic<size_t> available_{0};
friend class FreeListCategory;
friend class Page;
friend class MemoryChunk;
......@@ -3040,6 +3048,8 @@ class ReadOnlySpace : public PagedSpace {
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
size_t Available() override { return 0; }
private:
// Unseal the space after is has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment