Commit 18a2268f authored by Darius Mercadier's avatar Darius Mercadier Committed by Commit Bot

[heap] Add new freelist: FreeListManyCachedOrigin

This new FreeList should be a reasonable replacement for our old
FreeListLegacy: it is slightly less efficient (~1%), but uses much
less memory (often 5% less old_space size).

It is based on FreeListMany, with the following additions:

  - A cache to waste less time iterating empty categories

  - A fast path for allocations done in the runtime and generated code

  - A slow path (the same as FreeListMany actually) for allocations
    done in the GC.

Bug: v8:9329
Change-Id: Ifc10b88df7861266a721afd2c6e6d8357255ec4e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1762292
Commit-Queue: Darius Mercadier <dmercadier@google.com>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63345}
parent 73010d22
......@@ -830,7 +830,12 @@ DEFINE_BOOL(
"Combine with --no-inline-new to track allocations from generated code")
DEFINE_INT(gc_freelist_strategy, 0,
"Freelist strategy to use: "
"1=FreeListFastAlloc. 2=FreeListMany. Anything else=FreeListLegacy")
"0:FreeListLegacy. "
"1:FreeListFastAlloc. "
"2:FreeListMany. "
"3:FreeListManyCached. "
"4:FreeListManyCachedFastPath. "
"5:FreeListManyCachedOrigin. ")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
......
......@@ -345,11 +345,12 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
return SlowRefillLinearAllocationArea(size_in_bytes);
return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
......@@ -381,7 +382,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
......@@ -407,7 +408,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLinearAllocationArea(allocation_size)) {
if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
......
This diff is collapsed.
This diff is collapsed.
......@@ -143,5 +143,146 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
EXPECT_EQ(code_range6, code_range3);
}
// Tests that FreeListMany::SelectFreeListCategoryType returns what it should.
TEST_F(SpacesTest, FreeListManySelectFreeListCategoryType) {
FreeListMany free_list;
// Testing that all sizes below 256 bytes get assigned the correct category
for (size_t size = 0; size <= FreeListMany::kPreciseCategoryMaxSize; size++) {
FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
if (cat == 0) {
// If cat == 0, then we make sure that |size| doesn't fit in the 2nd
// category.
EXPECT_LT(size, free_list.categories_min[1]);
} else {
// Otherwise, size should fit in |cat|, but not in |cat+1|.
EXPECT_LE(free_list.categories_min[cat], size);
EXPECT_LT(size, free_list.categories_min[cat + 1]);
}
}
// Testing every size above 256 would take long time, so test only some
// "interesting cases": picking some number in the middle of the categories,
// as well as at the categories' bounds.
for (int cat = kFirstCategory + 1; cat <= free_list.last_category_; cat++) {
std::vector<size_t> sizes;
// Adding size less than this category's minimum
sizes.push_back(free_list.categories_min[cat] - 8);
// Adding size equal to this category's minimum
sizes.push_back(free_list.categories_min[cat]);
// Adding size greater than this category's minimum
sizes.push_back(free_list.categories_min[cat] + 8);
// Adding size between this category's minimum and the next category
if (cat != free_list.last_category_) {
sizes.push_back(
(free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
2);
}
for (size_t size : sizes) {
FreeListCategoryType cat = free_list.SelectFreeListCategoryType(size);
if (cat == free_list.last_category_) {
// If cat == last_category, then we make sure that |size| indeeds fits
// in the last category.
EXPECT_LE(free_list.categories_min[cat], size);
} else {
// Otherwise, size should fit in |cat|, but not in |cat+1|.
EXPECT_LE(free_list.categories_min[cat], size);
EXPECT_LT(size, free_list.categories_min[cat + 1]);
}
}
}
}
// Tests that FreeListMany::GuaranteedAllocatable returns what it should.
TEST_F(SpacesTest, FreeListManyGuaranteedAllocatable) {
FreeListMany free_list;
for (int cat = kFirstCategory; cat < free_list.last_category_; cat++) {
std::vector<size_t> sizes;
// Adding size less than this category's minimum
sizes.push_back(free_list.categories_min[cat] - 8);
// Adding size equal to this category's minimum
sizes.push_back(free_list.categories_min[cat]);
// Adding size greater than this category's minimum
sizes.push_back(free_list.categories_min[cat] + 8);
if (cat != free_list.last_category_) {
// Adding size between this category's minimum and the next category
sizes.push_back(
(free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
2);
}
for (size_t size : sizes) {
FreeListCategoryType cat_free =
free_list.SelectFreeListCategoryType(size);
size_t guaranteed_allocatable = free_list.GuaranteedAllocatable(size);
if (cat_free == free_list.last_category_) {
// If |cat_free| == last_category, then guaranteed_allocatable must
// return the last category, because when allocating, the last category
// is searched entirely.
EXPECT_EQ(free_list.SelectFreeListCategoryType(guaranteed_allocatable),
free_list.last_category_);
} else if (size < free_list.categories_min[0]) {
// If size < free_list.categories_min[0], then the bytes are wasted, and
// guaranteed_allocatable should return 0.
EXPECT_EQ(guaranteed_allocatable, 0ul);
} else {
// Otherwise, |guaranteed_allocatable| is equal to the minimum of
// |size|'s category (|cat_free|);
EXPECT_EQ(free_list.categories_min[cat_free], guaranteed_allocatable);
}
}
}
}
// Tests that
// FreeListManyCachedFastPath::SelectFastAllocationFreeListCategoryType returns
// what it should.
TEST_F(SpacesTest,
FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType) {
FreeListManyCachedFastPath free_list;
for (int cat = kFirstCategory; cat <= free_list.last_category_; cat++) {
std::vector<size_t> sizes;
// Adding size less than this category's minimum
sizes.push_back(free_list.categories_min[cat] - 8);
// Adding size equal to this category's minimum
sizes.push_back(free_list.categories_min[cat]);
// Adding size greater than this category's minimum
sizes.push_back(free_list.categories_min[cat] + 8);
// Adding size between this category's minimum and the next category
if (cat != free_list.last_category_) {
sizes.push_back(
(free_list.categories_min[cat] + free_list.categories_min[cat + 1]) /
2);
}
for (size_t size : sizes) {
FreeListCategoryType cat =
free_list.SelectFastAllocationFreeListCategoryType(size);
if (size <= FreeListManyCachedFastPath::kTinyObjectMaxSize) {
// For tiny objects, the first category of the fast path should be
// chosen.
EXPECT_TRUE(cat == FreeListManyCachedFastPath::kFastPathFirstCategory);
} else if (size >= free_list.categories_min[free_list.last_category_] -
FreeListManyCachedFastPath::kFastPathOffset) {
// For objects close to the minimum of the last category, the last
// category is chosen.
EXPECT_EQ(cat, free_list.last_category_);
} else {
// For other objects, the chosen category must satisfy that its minimum
// is at least |size|+1.85k.
EXPECT_GE(free_list.categories_min[cat],
size + FreeListManyCachedFastPath::kFastPathOffset);
// And the smaller categoriy's minimum is less than |size|+1.85k
// (otherwise it would have been chosen instead).
EXPECT_LT(free_list.categories_min[cat - 1],
size + FreeListManyCachedFastPath::kFastPathOffset);
}
}
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment