Commit 7af731c9 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Implement lazy sweeping on allocation

Sweep page by page in the space until we find a slot big enough for the
current allocation.

Bug: chromium:1056170
Change-Id: Id6dcf2d4db20268090b4626340bbed44f67d053c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2649259
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarAnton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72369}
parent 3e622204
......@@ -136,14 +136,15 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
// {
// StatsCollector::EnabledScope stats_scope(
// *space->raw_heap()->heap(), StatsCollector::kSweepOnAllocation);
// // TODO(chromium:1056170): Add lazy sweep.
// }
Sweeper& sweeper = raw_heap_->heap()->sweeper();
if (sweeper.SweepForAllocationIfRunning(space, size)) {
void* result = AllocateFromFreeList(space, size, gcinfo);
DCHECK_NOT_NULL(result);
return result;
}
// 4. Complete sweeping.
raw_heap_->heap()->sweeper().FinishIfRunning();
sweeper.FinishIfRunning();
// 5. Add a new page to this heap.
auto* new_page = NormalPage::Create(page_backend_, space);
......
......@@ -104,6 +104,7 @@ struct SpaceState {
FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false;
size_t largest_new_free_list_entry = 0;
};
ThreadSafeStack<BasePage*> unswept_pages;
......@@ -122,7 +123,10 @@ void StickyUnmark(HeapObjectHeader* header) {
// Builder that finalizes objects and adds freelist entries right away.
class InlinedFinalizationBuilder final {
public:
using ResultType = bool;
struct ResultType {
bool is_empty = false;
size_t largest_new_free_list_entry = 0;
};
explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
......@@ -136,7 +140,9 @@ class InlinedFinalizationBuilder final {
space->free_list().Add({start, size});
}
ResultType GetResult(bool is_empty) { return is_empty; }
ResultType GetResult(bool is_empty, size_t largest_new_free_list_entry) {
return {is_empty, largest_new_free_list_entry};
}
private:
BasePage* page_;
......@@ -167,8 +173,9 @@ class DeferredFinalizationBuilder final {
found_finalizer_ = false;
}
ResultType&& GetResult(bool is_empty) {
ResultType&& GetResult(bool is_empty, size_t largest_new_free_list_entry) {
result_.is_empty = is_empty;
result_.largest_new_free_list_entry = largest_new_free_list_entry;
return std::move(result_);
}
......@@ -185,6 +192,8 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
size_t largest_new_free_list_entry = 0;
Address start_of_gap = page->PayloadStart();
for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
begin != end;) {
......@@ -205,8 +214,11 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(header_address - start_of_gap));
size_t new_free_list_entry_size =
static_cast<size_t>(header_address - start_of_gap);
builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
largest_new_free_list_entry =
std::max(largest_new_free_list_entry, new_free_list_entry_size);
bitmap.SetBit(start_of_gap);
}
StickyUnmark(header);
......@@ -223,7 +235,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
}
const bool is_empty = (start_of_gap == page->PayloadStart());
return builder.GetResult(is_empty);
return builder.GetResult(is_empty, largest_new_free_list_entry);
}
// SweepFinalizer is responsible for heap/space/page finalization. Finalization
......@@ -297,12 +309,20 @@ class SweepFinalizer final {
space_freelist.Add(std::move(entry));
}
largest_new_free_list_entry_ = std::max(
page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
// Add the page to the space.
page->space()->AddPage(page);
}
size_t largest_new_free_list_entry() const {
return largest_new_free_list_entry_;
}
private:
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
};
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
......@@ -315,11 +335,13 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
void Sweep() {
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
Traverse(*page);
SweepPage(*page);
}
}
}
void SweepPage(BasePage* page) { Traverse(page); }
bool SweepWithDeadline(double deadline_in_seconds) {
DCHECK(platform_);
static constexpr double kSlackInSeconds = 0.001;
......@@ -345,6 +367,10 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
return true;
}
size_t largest_new_free_list_entry() const {
return largest_new_free_list_entry_;
}
private:
bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
static constexpr size_t kDeadlineCheckInterval = 8;
......@@ -362,11 +388,14 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
}
bool VisitNormalPage(NormalPage* page) {
const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
if (is_empty) {
const InlinedFinalizationBuilder::ResultType result =
SweepNormalPage<InlinedFinalizationBuilder>(page);
if (result.is_empty) {
NormalPage::Destroy(page);
} else {
page->space()->AddPage(page);
largest_new_free_list_entry_ = std::max(
result.largest_new_free_list_entry, largest_new_free_list_entry_);
}
return true;
}
......@@ -385,6 +414,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
SpaceStates* states_;
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
};
class ConcurrentSweepTask final : public cppgc::JobTask,
......@@ -522,6 +552,43 @@ class Sweeper::SweeperImpl final {
}
}
bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
if (!is_in_progress_) return false;
// Bail out for recursive sweeping calls. This can happen when finalizers
// allocate new memory.
if (is_sweeping_on_mutator_thread_) return false;
StatsCollector::EnabledScope stats_scope(*heap_->heap(),
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(
*heap_->heap(), StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progresss(*this);
SpaceState& space_state = space_states_[space->index()];
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
SweepFinalizer finalizer(platform_);
while (auto page = space_state.swept_unfinalized_pages.Pop()) {
finalizer.FinalizePage(&*page);
if (size <= finalizer.largest_new_free_list_entry()) return true;
}
}
{
// Then, if no matching slot is found in the unfinalized pages, search the
// unswept page. This also helps out the concurrent sweeper.
MutatorThreadSweeper sweeper(&space_states_, platform_);
while (auto page = space_state.unswept_pages.Pop()) {
sweeper.SweepPage(*page);
if (size <= sweeper.largest_new_free_list_entry()) return true;
}
}
return false;
}
void FinishIfRunning() {
if (!is_in_progress_) return;
......@@ -717,6 +784,9 @@ void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
}
void Sweeper::NotifyDoneIfNeeded() { impl_->NotifyDoneIfNeeded(); }
bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
return impl_->SweepForAllocationIfRunning(space, size);
}
bool Sweeper::IsSweepingOnMutatorThread() const {
return impl_->IsSweepingOnMutatorThread();
}
......
......@@ -19,6 +19,7 @@ namespace internal {
class StatsCollector;
class RawHeap;
class ConcurrentSweeperTest;
class NormalPageSpace;
class V8_EXPORT_PRIVATE Sweeper final {
public:
......@@ -41,6 +42,10 @@ class V8_EXPORT_PRIVATE Sweeper final {
void Start(SweepingConfig);
void FinishIfRunning();
void NotifyDoneIfNeeded();
// SweepForAllocationIfRunning sweeps the given |space| until a slot that can
// fit an allocation of size |size| is found. Returns true if a slot was
// found.
bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size);
bool IsSweepingOnMutatorThread() const;
......
......@@ -269,5 +269,62 @@ TEST_F(SweeperTest, UnmarkObjects) {
#endif
}
TEST_F(SweeperTest, LazySweepingDuringAllocation) {
using GCedObject = GCed<256>;
static const size_t kObjectsPerPage =
NormalPage::PayloadSize() /
(sizeof(GCedObject) + sizeof(HeapObjectHeader));
// This test expects each page contain at least 2 objects.
DCHECK_LT(2u, kObjectsPerPage);
PreciseGC();
std::vector<Persistent<GCedObject>> first_page;
first_page.push_back(MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
GCedObject* expected_address_on_first_page =
MakeGarbageCollected<GCedObject>(GetAllocationHandle());
for (size_t i = 2; i < kObjectsPerPage; ++i) {
first_page.push_back(
MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
}
std::vector<Persistent<GCedObject>> second_page;
second_page.push_back(
MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
GCedObject* expected_address_on_second_page =
MakeGarbageCollected<GCedObject>(GetAllocationHandle());
for (size_t i = 2; i < kObjectsPerPage; ++i) {
second_page.push_back(
MakeGarbageCollected<GCedObject>(GetAllocationHandle()));
}
testing::TestPlatform::DisableBackgroundTasksScope no_concurrent_sweep_scope(
GetPlatformHandle().get());
g_destructor_callcount = 0;
static constexpr Heap::Config config = {
Heap::Config::CollectionType::kMajor,
Heap::Config::StackState::kNoHeapPointers,
Heap::Config::MarkingType::kAtomic,
Heap::Config::SweepingType::kIncrementalAndConcurrent};
Heap::From(GetHeap())->CollectGarbage(config);
// Incremetal sweeping is active and the space should have two pages with
// no room for an additional GCedObject. Allocating a new GCedObject should
// trigger sweeping. All objects other than the 2nd object on each page are
// marked. Lazy sweeping on allocation should reclaim the object on one of
// the pages and reuse its memory. The object on the other page should remain
// un-reclaimed. To confirm: the newly object will be allcoated at one of the
// expected addresses and the GCedObject destructor is only called once.
GCedObject* new_object1 =
MakeGarbageCollected<GCedObject>(GetAllocationHandle());
EXPECT_EQ(1u, g_destructor_callcount);
EXPECT_TRUE((new_object1 == expected_address_on_first_page) ||
(new_object1 == expected_address_on_second_page));
// Allocating again should reclaim the other unmarked object and reuse its
// memory. The destructor will be called again and the new object will be
// allocated in one of the expected addresses but not the same one as before.
GCedObject* new_object2 =
MakeGarbageCollected<GCedObject>(GetAllocationHandle());
EXPECT_EQ(2u, g_destructor_callcount);
EXPECT_TRUE((new_object2 == expected_address_on_first_page) ||
(new_object2 == expected_address_on_second_page));
EXPECT_NE(new_object1, new_object2);
}
} // namespace internal
} // namespace cppgc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment