Commit 490a9ead authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Add methods for freeing all LABs

This CL adds Heap::FreeLinearAllocationAreas to free all local
allocation buffers (LABs). We use this to give up LABs for a local GC.
The second method Heap::FreeSharedLinearAllocationAreas is used to free
all LABs in the shared heap for shared GCs and in the future also on
isolate shutdown.

Bug: v8:10315
Change-Id: Ie5cbb68c95fae027055aeaf4458473b04b15b18c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3279681Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77900}
parent e8db01af
......@@ -2273,9 +2273,7 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
DCHECK_NOT_NULL(isolate()->global_safepoint());
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
Heap* client_heap = client->heap();
client_heap->shared_old_allocator_->FreeLinearAllocationArea();
client_heap->shared_map_allocator_->FreeLinearAllocationArea();
client->heap()->FreeSharedLinearAllocationAreas();
});
PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
......@@ -3536,6 +3534,26 @@ void Heap::MakeHeapIterable() {
// iterable up to top().
}
void Heap::FreeLinearAllocationAreas() {
safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->FreeLinearAllocationArea();
}
// New space is bump-pointer allocation only and therefore guaranteed to be
// iterable up to top().
}
void Heap::FreeSharedLinearAllocationAreas() {
if (!isolate()->shared_isolate()) return;
shared_old_allocator_->FreeLinearAllocationArea();
shared_map_allocator_->FreeLinearAllocationArea();
}
namespace {
double ComputeMutatorUtilizationImpl(double mutator_speed, double gc_speed) {
......
......@@ -1785,6 +1785,12 @@ class Heap {
// with the allocation memento of the object at the top
void EnsureFillerObjectAtTop();
// Free all LABs in the heap.
void FreeLinearAllocationAreas();
// Free all shared LABs.
void FreeSharedLinearAllocationAreas();
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
......
......@@ -919,16 +919,14 @@ void MarkCompactCollector::Prepare() {
StartMarking();
}
heap_->FreeLinearAllocationAreas();
PagedSpaceIterator spaces(heap());
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
// Fill and reset all background thread LABs
heap_->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
// All objects are guaranteed to be initialized in atomic pause
if (heap()->new_lo_space()) {
heap()->new_lo_space()->ResetPendingObject();
......
......@@ -860,10 +860,6 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// OldSpace implementation
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_->Reset();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment