Commit b0abfb4f authored by Darius Mercadier's avatar Darius Mercadier Committed by Commit Bot

[heap] Add fast allocation strategy (--gc-freelist-strategy=1)

This CL adds a new FreeList strategy, that can be turned on by using flag
`--gc-freelist-strategy=1`.  It is inspired by FreeListLegacy, and differs from
it in the following ways:
 - Only has 3 categories: Medium, Large and Huge.
 - Any block that would have belong to tiniest, tiny or small in FreeListLegacy
   is considered wasted.
 - Allocation is done only in Huge, Medium and Large (in that order), using a
   first-fit strategy (only the first block of each freelist is ever considered
   though).
 - Performances is supposed to be better than FreeListLegacy, but memory usage
   should be higher (because fragmentation will probably be higher).

Bug: v8:9329
Change-Id: Ib399196788f1dfaa1aeddc3dc721375dd7da65f1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1697248
Commit-Queue: Darius Mercadier <dmercadier@google.com>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62667}
parent 77a17022
......@@ -785,6 +785,9 @@ DEFINE_BOOL(trace_gc_freelists_verbose, false,
DEFINE_IMPLICATION(trace_gc_freelists_verbose, trace_gc_freelists)
DEFINE_BOOL(trace_evacuation_candidates, false,
"Show statistics about the pages evacuation by the compaction")
DEFINE_INT(gc_freelist_strategy, 0,
"Freelist strategy to use: "
"1=FreeListFastAlloc. Anything else=FreeListLegacy")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
......
......@@ -2983,7 +2983,13 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
FreeList* FreeList::CreateFreeList() { return new FreeListLegacy(); }
FreeList* FreeList::CreateFreeList() {
if (FLAG_gc_freelist_strategy == 1) {
return new FreeListFastAlloc();
} else {
return new FreeListLegacy();
}
}
FreeListLegacy::FreeListLegacy() {
wasted_bytes_ = 0;
......@@ -3103,6 +3109,72 @@ FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size) {
return node;
}
FreeListFastAlloc::FreeListFastAlloc() {
wasted_bytes_ = 0;
number_of_categories_ = kHuge + 1;
last_category_ = kHuge;
categories_ = new FreeListCategory*[number_of_categories_]();
Reset();
}
FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
size_t FreeListFastAlloc::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
Page* page = Page::FromAddress(start);
page->DecreaseAllocatedBytes(size_in_bytes);
// Blocks have to be a minimum size to hold free list items.
if (size_in_bytes < kMinBlockSize) {
page->add_wasted_memory(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
page->free_list_category(type)->Free(start, size_in_bytes, mode);
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes());
return 0;
}
FreeSpace FreeListFastAlloc::TryFindNodeIn(FreeListCategoryType type,
size_t minimum_size,
size_t* node_size) {
FreeListCategory* category = categories_[type];
if (category == nullptr) return FreeSpace();
FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
if (!node.is_null()) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
if (category->is_empty()) {
RemoveCategory(category);
}
return node;
}
FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
// Try to allocate the biggest element possible (to make the most of later
// bump-pointer allocations).
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
for (int i = kHuge; i >= type && node.is_null(); i--) {
node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
node_size);
}
if (!node.is_null()) {
Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
}
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
......
......@@ -240,7 +240,19 @@ class FreeList {
virtual ~FreeList() = default;
virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
// bytes that were not added to the free list, because the freed memory block
// was too small. Bookkeeping information will be written to the block, i.e.,
// its contents will be destroyed. The start address should be word aligned,
// and the size should be a non-zero multiple of the word size.
virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode) = 0;
// Allocates a free space node frome the free list of at least size_in_bytes
// bytes. Returns the actual node size in node_size which can be bigger than
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size) = 0;
......@@ -1809,22 +1821,12 @@ class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
FreeListLegacy();
~FreeListLegacy();
// Adds a node on the free list. The block of size {size_in_bytes} starting
// at {start} is placed on the free list. The return value is the number of
// bytes that were not added to the free list, because they freed memory block
// was too small. Bookkeeping information will be written to the block, i.e.,
// its contents will be destroyed. The start address should be word aligned,
// and the size should be a non-zero multiple of the word size.
size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
// Allocates a free space node frome the free list of at least size_in_bytes
// bytes. Returns the actual node size in node_size which can be bigger than
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size) override;
private:
protected:
enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
static const size_t kMinBlockSize = 3 * kTaggedSize;
......@@ -1874,6 +1876,81 @@ class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
friend class heap::HeapTester;
};
// Inspired by FreeListLegacy.
// Only has 3 categories: Medium, Large and Huge.
// Any block that would have belong to tiniest, tiny or small in FreeListLegacy
// is considered wasted.
// Allocation is done only in Huge, Medium and Large (in that order),
// using a first-fit strategy (only the first block of each freelist is ever
// considered though). Performances is supposed to be better than
// FreeListLegacy, but memory usage should be higher (because fragmentation will
// probably be higher).
class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
public:
size_t GuaranteedAllocatable(size_t maximum_freed) override {
if (maximum_freed <= kMediumListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
return 0;
} else if (maximum_freed <= kLargeListMax) {
return kLargeAllocationMax;
}
return maximum_freed;
}
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kMediumListMax) {
return kMedium;
} else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
Page* GetPageForSize(size_t size_in_bytes) override {
const int minimum_category =
static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
Page* page = GetPageForCategoryType(kHuge);
if (!page && static_cast<int>(kLarge) >= minimum_category)
page = GetPageForCategoryType(kLarge);
if (!page && static_cast<int>(kMedium) >= minimum_category)
page = GetPageForCategoryType(kMedium);
return page;
}
FreeListFastAlloc();
~FreeListFastAlloc();
size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
size_t* node_size) override;
protected:
enum { kMedium, kLarge, kHuge };
static const size_t kMinBlockSize = 0xff * kTaggedSize;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
static const size_t kMediumListMax = 0x7ff * kTaggedSize;
static const size_t kLargeListMax = 0x1fff * kTaggedSize;
static const size_t kMediumAllocationMax = kMinBlockSize;
static const size_t kLargeAllocationMax = kMediumListMax;
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty or the top entry is smaller
// than minimum_size.
FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
size_t* node_size);
Page* GetPageForCategoryType(FreeListCategoryType type) {
return top(type) ? top(type)->page() : nullptr;
}
};
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
// synchronization.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment