Commit d5245542 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove unused sweeping for iterability mechanism

Since we now promote all young objects into old space on full GCs, we
don't need to sweep pages for iterability anymore in new space.

Minor MC doesn't need to make a page iterable when promoting the full
page into the new space. This is because maps are not reclaimed during
a minor GC.

Bug: v8:12760
Change-Id: I16d666e417d00ebf450453864cbd87afd6606afc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3635723Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80447}
parent 7e233997
......@@ -818,7 +818,6 @@ void GCTracer::PrintNVP() const {
"gc=%s "
"reduce_memory=%d "
"minor_mc=%.2f "
"finish_sweeping=%.2f "
"time_to_safepoint=%.2f "
"mark=%.2f "
"mark.seed=%.2f "
......@@ -842,7 +841,6 @@ void GCTracer::PrintNVP() const {
"reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_scope(Scope::MINOR_MC),
current_scope(Scope::MINOR_MC_SWEEPING),
current_scope(Scope::TIME_TO_SAFEPOINT),
current_scope(Scope::MINOR_MC_MARK),
current_scope(Scope::MINOR_MC_MARK_SEED),
......
......@@ -2694,8 +2694,6 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
}
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
// Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
......@@ -2776,8 +2774,6 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
SetGCState(SCAVENGE);
// Flip the semispaces. After flipping, to space is empty, from space has
......@@ -3350,11 +3346,11 @@ void Heap::CreateFillerObjectAtBackground(Address addr, int size) {
ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo);
}
void Heap::CreateFillerObjectAtSweeper(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode) {
void Heap::CreateFillerObjectAtSweeper(Address addr, int size) {
// Do not verify whether slots are cleared here: the concurrent sweeper is not
// allowed to access the main thread's remembered set.
CreateFillerObjectAtRaw(addr, size, clear_memory_mode,
CreateFillerObjectAtRaw(addr, size,
ClearFreedMemoryMode::kDontClearFreedMemory,
ClearRecordedSlots::kNo, VerifyNoSlotsRecorded::kNo);
}
......
......@@ -1831,10 +1831,8 @@ class Heap {
// This method is used by the sweeper on free memory ranges to make the page
// iterable again. Unlike `CreateFillerObjectAt` this method will not verify
// slots since the sweeper can run concurrently. The sweeper can also
// optionally clear the object payload.
void CreateFillerObjectAtSweeper(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode);
// slots since the sweeper can run concurrently.
void CreateFillerObjectAtSweeper(Address addr, int size);
// Creates a filler object in the specificed memory area. This method is the
// internal method used by all CreateFillerObjectAtXXX-methods.
......
......@@ -1097,7 +1097,6 @@ void MarkCompactCollector::Finish() {
}
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
......@@ -5515,11 +5514,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
// Minor MC does not support processing the ephemeron remembered set.
DCHECK(heap()->ephemeron_remembered_set_.empty());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
}
heap()->array_buffer_sweeper()->EnsureFinished();
MarkLiveObjects();
......@@ -5590,7 +5584,7 @@ void MinorMarkCompactCollector::MakeIterable(
full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size));
......@@ -5607,7 +5601,7 @@ void MinorMarkCompactCollector::MakeIterable(
full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size));
......@@ -6171,13 +6165,14 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
if (!chunk->IsLargePage()) {
if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
IGNORE_FREE_SPACE);
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
}
break;
......@@ -6189,12 +6184,14 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
DCHECK(!chunk->IsLargePage());
if (heap()->ShouldZapGarbage()) {
collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk), IGNORE_FREE_SPACE);
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
break;
case kObjectsOldToOld:
......
This diff is collapsed.
......@@ -23,11 +23,10 @@ class Page;
class PagedSpace;
class Space;
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum class FreeSpaceTreatmentMode { kIgnoreFreeSpace, kZapFreeSpace };
class Sweeper {
public:
using IterabilityList = std::vector<Page*>;
using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
......@@ -92,8 +91,7 @@ class Sweeper {
void ScheduleIncrementalSweepingTask();
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
// After calling this function sweeping is considered to be in progress
......@@ -110,13 +108,8 @@ class Sweeper {
Page* GetSweptPageSafe(PagedSpace* space);
void AddPageForIterability(Page* page);
void StartIterabilityTasks();
void EnsureIterabilityCompleted();
private:
class IncrementalSweeperTask;
class IterabilityTask;
class SweeperJob;
static const int kNumberOfSweepingSpaces =
......@@ -134,10 +127,9 @@ class Sweeper {
// FreeSpaceTreatmentMode this function may add the free memory to a free
// list, make the memory iterable, clear it, and return the free memory to
// the operating system.
size_t FreeAndProcessFreedMemory(Address free_start, Address free_end,
Page* page, Space* space,
FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode);
size_t FreeAndProcessFreedMemory(
Address free_start, Address free_end, Page* page, Space* space,
FreeSpaceTreatmentMode free_space_treatment_mode);
// Helper function for RawSweep. Handle remembered set entries in the freed
// memory which require clearing.
......@@ -155,8 +147,7 @@ class Sweeper {
// Helper function for RawSweep. Clears the mark bits and ensures consistency
// of live bytes.
void ClearMarkBitsAndHandleLivenessStatistics(
Page* page, size_t live_bytes, FreeListRebuildingMode free_list_mode);
void ClearMarkBitsAndHandleLivenessStatistics(Page* page, size_t live_bytes);
// Can only be called on the main thread when no tasks are running.
bool IsDoneSweeping() const {
......@@ -182,12 +173,6 @@ class Sweeper {
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
void MakeIterable(Page* page);
bool IsValidIterabilitySpace(AllocationSpace space) {
return space == NEW_SPACE || space == RO_SPACE;
}
static bool IsValidSweepingSpace(AllocationSpace space) {
return space >= FIRST_GROWABLE_PAGED_SPACE &&
space <= LAST_GROWABLE_PAGED_SPACE;
......@@ -209,13 +194,6 @@ class Sweeper {
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
// Pages that are only made iterable but have their free lists ignored.
IterabilityList iterability_list_;
CancelableTaskManager::Id iterability_task_id_;
base::Semaphore iterability_task_semaphore_;
bool iterability_in_progress_;
bool iterability_task_started_;
bool should_reduce_memory_;
};
......
......@@ -607,7 +607,6 @@
F(MINOR_MC_MARK_WEAK) \
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
F(SAFEPOINT) \
F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment