Commit d5245542 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove unused sweeping for iterability mechanism

Since we now promote all young objects into old space on full GCs, we
don't need to sweep pages for iterability anymore in new space.

Minor MC doesn't need to make a page iterable when promoting the full
page into the new space. This is because maps are not reclaimed during
a minor GC.

Bug: v8:12760
Change-Id: I16d666e417d00ebf450453864cbd87afd6606afc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3635723Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80447}
parent 7e233997
......@@ -818,7 +818,6 @@ void GCTracer::PrintNVP() const {
"gc=%s "
"reduce_memory=%d "
"minor_mc=%.2f "
"finish_sweeping=%.2f "
"time_to_safepoint=%.2f "
"mark=%.2f "
"mark.seed=%.2f "
......@@ -842,7 +841,6 @@ void GCTracer::PrintNVP() const {
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_scope(Scope::MINOR_MC),
current_scope(Scope::MINOR_MC_SWEEPING),
current_scope(Scope::TIME_TO_SAFEPOINT),
current_scope(Scope::MINOR_MC_MARK),
current_scope(Scope::MINOR_MC_MARK_SEED),
......
......@@ -2694,8 +2694,6 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
}
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
// Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
......@@ -2776,8 +2774,6 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
// Flip the semispaces. After flipping, to space is empty, from space has
......@@ -3350,11 +3346,11 @@ void Heap::CreateFillerObjectAtBackground(Address addr, int size) {
ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo);
}
void Heap::CreateFillerObjectAtSweeper(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode) {
void Heap::CreateFillerObjectAtSweeper(Address addr, int size) {
// Do not verify whether slots are cleared here: the concurrent sweeper is not
// allowed to access the main thread's remembered set.
CreateFillerObjectAtRaw(addr, size, clear_memory_mode,
CreateFillerObjectAtRaw(addr, size,
ClearFreedMemoryMode::kDontClearFreedMemory,
ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo);
}
......
......@@ -1831,10 +1831,8 @@ class Heap {
// This method is used by the sweeper on free memory ranges to make the page
// iterable again. Unlike `CreateFillerObjectAt` this method will not verify
// slots since the sweeper can run concurrently. The sweeper can also
// optionally clear the object payload.
void CreateFillerObjectAtSweeper(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode);
// slots since the sweeper can run concurrently.
void CreateFillerObjectAtSweeper(Address addr, int size);
// Creates a filler object in the specificed memory area. This method is the
// internal method used by all CreateFillerObjectAtXXX-methods.
......
......@@ -1097,7 +1097,6 @@ void MarkCompactCollector::Finish() {
}
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
......@@ -5515,11 +5514,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
// Minor MC does not support processing the ephemeron remembered set.
DCHECK(heap()->ephemeron_remembered_set_.empty());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
}
heap()->array_buffer_sweeper()->EnsureFinished();
MarkLiveObjects();
......@@ -5590,7 +5584,7 @@ void MinorMarkCompactCollector::MakeIterable(
full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size));
......@@ -5607,7 +5601,7 @@ void MinorMarkCompactCollector::MakeIterable(
full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size));
......@@ -6171,13 +6165,14 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
if (!chunk->IsLargePage()) {
if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
IGNORE_FREE_SPACE);
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
}
break;
......@@ -6189,12 +6184,14 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
DCHECK(!chunk->IsLargePage());
if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk), IGNORE_FREE_SPACE);
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
break;
case kObjectsOldToOld:
......
......@@ -24,9 +24,6 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
marking_state_(marking_state),
incremental_sweeper_pending_(false),
sweeping_in_progress_(false),
iterability_task_semaphore_(0),
iterability_in_progress_(false),
iterability_task_started_(false),
should_reduce_memory_(false) {}
Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
......@@ -147,7 +144,6 @@ void Sweeper::TearDown() {
void Sweeper::StartSweeping() {
sweeping_in_progress_ = true;
iterability_in_progress_ = true;
should_reduce_memory_ = heap_->ShouldReduceMemory();
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
......@@ -193,8 +189,6 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
EnsureIterabilityCompleted();
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces([this](AllocationSpace space) {
......@@ -228,24 +222,16 @@ bool Sweeper::AreSweeperTasksRunning() {
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
FreeSpaceTreatmentMode free_space_treatment_mode) {
CHECK_GT(free_end, free_start);
size_t freed_bytes = 0;
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
if (free_space_treatment_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
ClearFreedMemoryMode clear_memory_mode =
(free_list_mode == REBUILD_FREE_LIST)
? ClearFreedMemoryMode::kDontClearFreedMemory
: ClearFreedMemoryMode::kClearFreedMemory;
page->heap()->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size),
clear_memory_mode);
if (free_list_mode == REBUILD_FREE_LIST) {
page->heap()->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size));
freed_bytes =
reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(free_start, size);
}
if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
return freed_bytes;
......@@ -313,30 +299,22 @@ void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
page->ClearInvalidTypedSlots<OLD_TO_SHARED>(free_ranges_map);
}
void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode) {
void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(Page* page,
size_t live_bytes) {
marking_state_->bitmap(page)->Clear();
if (free_list_mode == IGNORE_FREE_LIST) {
marking_state_->SetLiveBytes(page, 0);
// We did not free memory, so have to adjust allocated bytes here.
intptr_t freed_bytes = page->area_size() - live_bytes;
page->DecreaseAllocatedBytes(freed_bytes);
} else {
// Keep the old live bytes counter of the page until RefillFreeList, where
// the space size is refined.
// The allocated_bytes() counter is precisely the total size of objects.
DCHECK_EQ(live_bytes, page->allocated_bytes());
}
}
int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode,
const base::MutexGuard& page_guard) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
// Phase 1: Prepare the page for sweeping.
......@@ -405,7 +383,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
max_freed_bytes =
std::max(max_freed_bytes,
FreeAndProcessFreedMemory(free_start, free_end, p, space,
free_list_mode, free_space_mode));
free_space_treatment_mode));
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, record_free_ranges, &free_ranges_map,
sweeping_mode, &invalidated_old_to_new_cleanup,
......@@ -434,7 +412,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
max_freed_bytes =
std::max(max_freed_bytes,
FreeAndProcessFreedMemory(free_start, free_end, p, space,
free_list_mode, free_space_mode));
free_space_treatment_mode));
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, record_free_ranges, &free_ranges_map,
sweeping_mode, &invalidated_old_to_new_cleanup,
......@@ -443,7 +421,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
// Phase 3: Post process the page.
CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map, sweeping_mode);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes);
if (active_system_pages_after_sweeping) {
// Decrement accounted memory for discarded memory.
......@@ -454,7 +432,6 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
if (code_object_registry) code_object_registry->Finalize();
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(
p->owner()->free_list()->GuaranteedAllocatable(max_freed_bytes));
......@@ -533,10 +510,10 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
page->concurrent_sweeping_state());
page->set_concurrent_sweeping_state(
Page::ConcurrentSweepingState::kInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
sweeping_mode, guard);
const FreeSpaceTreatmentMode free_space_treatment_mode =
Heap::ShouldZapGarbage() ? FreeSpaceTreatmentMode::kZapFreeSpace
: FreeSpaceTreatmentMode::kIgnoreFreeSpace;
max_freed = RawSweep(page, free_space_treatment_mode, sweeping_mode, guard);
DCHECK(page->SweepingDone());
}
......@@ -566,7 +543,6 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
}
} else {
DCHECK(page->InNewSpace());
EnsureIterabilityCompleted();
}
CHECK(page->SweepingDone());
......@@ -639,87 +615,5 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
return page;
}
void Sweeper::EnsureIterabilityCompleted() {
if (!iterability_in_progress_) return;
if (FLAG_concurrent_sweeping && iterability_task_started_) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(
iterability_task_id_) != TryAbortResult::kTaskAborted) {
iterability_task_semaphore_.Wait();
}
iterability_task_started_ = false;
}
for (Page* page : iterability_list_) {
MakeIterable(page);
}
iterability_list_.clear();
iterability_in_progress_ = false;
}
class Sweeper::IterabilityTask final : public CancelableTask {
public:
IterabilityTask(Isolate* isolate, Sweeper* sweeper,
base::Semaphore* pending_iterability_task)
: CancelableTask(isolate),
sweeper_(sweeper),
pending_iterability_task_(pending_iterability_task),
tracer_(isolate->heap()->tracer()) {}
~IterabilityTask() override = default;
IterabilityTask(const IterabilityTask&) = delete;
IterabilityTask& operator=(const IterabilityTask&) = delete;
private:
void RunInternal() final {
TRACE_GC_EPOCH(tracer_, GCTracer::Scope::MC_BACKGROUND_SWEEPING,
ThreadKind::kBackground);
for (Page* page : sweeper_->iterability_list_) {
sweeper_->MakeIterable(page);
}
sweeper_->iterability_list_.clear();
pending_iterability_task_->Signal();
}
Sweeper* const sweeper_;
base::Semaphore* const pending_iterability_task_;
GCTracer* const tracer_;
};
void Sweeper::StartIterabilityTasks() {
if (!iterability_in_progress_) return;
DCHECK(!iterability_task_started_);
if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
auto task = std::make_unique<IterabilityTask>(heap_->isolate(), this,
&iterability_task_semaphore_);
iterability_task_id_ = task->id();
iterability_task_started_ = true;
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
void Sweeper::AddPageForIterability(Page* page) {
DCHECK(sweeping_in_progress_);
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
page->concurrent_sweeping_state());
iterability_list_.push_back(page);
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
}
void Sweeper::MakeIterable(Page* page) {
base::MutexGuard guard(page->mutex());
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
SweepingMode::kLazyOrConcurrent, guard);
}
} // namespace internal
} // namespace v8
......@@ -23,11 +23,10 @@ class Page;
class PagedSpace;
class Space;
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum class FreeSpaceTreatmentMode { kIgnoreFreeSpace, kZapFreeSpace };
class Sweeper {
public:
using IterabilityList = std::vector<Page*>;
using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
......@@ -92,8 +91,7 @@ class Sweeper {
void ScheduleIncrementalSweepingTask();
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
// After calling this function sweeping is considered to be in progress
......@@ -110,13 +108,8 @@ class Sweeper {
Page* GetSweptPageSafe(PagedSpace* space);
void AddPageForIterability(Page* page);
void StartIterabilityTasks();
void EnsureIterabilityCompleted();
private:
class IncrementalSweeperTask;
class IterabilityTask;
class SweeperJob;
static const int kNumberOfSweepingSpaces =
......@@ -134,10 +127,9 @@ class Sweeper {
// FreeSpaceTreatmentMode this function may add the free memory to a free
// list, make the memory iterable, clear it, and return the free memory to
// the operating system.
size_t FreeAndProcessFreedMemory(Address free_start, Address free_end,
Page* page, Space* space,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
size_t FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
FreeSpaceTreatmentMode free_space_treatment_mode);
// Helper function for RawSweep. Handle remembered set entries in the freed
// memory which require clearing.
......@@ -155,8 +147,7 @@ class Sweeper {
// Helper function for RawSweep. Clears the mark bits and ensures consistency
// of live bytes.
void ClearMarkBitsAndHandleLivenessStatistics(
Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode);
void ClearMarkBitsAndHandleLivenessStatistics(Page* page, size_t live_bytes);
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
......@@ -182,12 +173,6 @@ class Sweeper {
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
void MakeIterable(Page* page);
bool IsValidIterabilitySpace(AllocationSpace space) {
return space == NEW_SPACE || space == RO_SPACE;
}
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
......@@ -209,13 +194,6 @@ class Sweeper {
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
// Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_;
CancelableTaskManager::Id iterability_task_id_;
base::Semaphore iterability_task_semaphore_;
bool iterability_in_progress_;
bool iterability_task_started_;
bool should_reduce_memory_;
};
......
......@@ -607,7 +607,6 @@
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SAFEPOINT) \
F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment