Commit ca505562 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove sweeping_slot_set_ from MemoryChunk

Since the new space is always empty after a full GC, the old-to-new
remembered set is also always empty after a full GC. This means we can
get rid of the sweeping_slot_set_.

This slot set was used to allow the main thread to insert into the
old-to-new remembered set non-atomically. The sweeping slot set was
owned by the sweeper, which deletes slots in free memory from it. The
main thread would start with an empty old-to-new remembered set. After
sweeping both slot sets are merged again.

The sweeper now needs to behave differently during a GC. When sweeping
a page during full GC, the sweeper needs to delete old-to-new-slots in
free memory.

Outside of the GC the sweeper isn't allowed to remove from the
old-to-new slots anymore. This would race with the main thread that adds
slots to that remembered set while the sweeper is running. However,
there should be no recorded slots in free memory. DCHECKing this is
tricky though, because we would need to synchronize with the main
thread right-trimming objects and at least String::MakeThin only deletes
slots after the map release-store.

Bug: v8:12760
Change-Id: Ic0301851a714e894c3040595f456ab93b5875c81
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3560638Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79713}
parent c3ed607d
......@@ -4724,18 +4724,6 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
return KEEP_SLOT;
},
SlotSet::FREE_EMPTY_BUCKETS);
if (direction == OLD_TO_NEW) {
CHECK(chunk->SweepingDone());
RememberedSetSweeping::Iterate(
chunk,
[start, end, untyped](MaybeObjectSlot slot) {
if (start <= slot.address() && slot.address() < end) {
untyped->insert(slot.address());
}
return KEEP_SLOT;
},
SlotSet::FREE_EMPTY_BUCKETS);
}
RememberedSet<direction>::IterateTyped(
chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) {
......
......@@ -80,7 +80,6 @@ Address LargePage::GetAddressToShrink(Address object_address,
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
......
......@@ -1517,7 +1517,6 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone());
DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
......@@ -2634,7 +2633,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
......@@ -2887,7 +2885,6 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
......@@ -3978,13 +3975,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
this, std::move(evacuation_items), nullptr);
}
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
// sweeper's swept_list_. Merge remembered sets for those pages as
// well such that after mark-compact all pages either store slots
// in the sweeping or old-to-new remembered set.
sweeper()->MergeOldToNewRememberedSetsForSweptPages();
const size_t aborted_pages = PostProcessEvacuationCandidates();
if (FLAG_trace_evacuation) {
......@@ -4375,11 +4365,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
DCHECK_IMPLIES(
collector == GarbageCollector::MARK_COMPACTOR,
chunk_->SweepingDone() &&
chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
......@@ -4396,30 +4381,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
}
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
DCHECK_IMPLIES(
collector == GarbageCollector::MARK_COMPACTOR,
!chunk_->SweepingDone() &&
(chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
nullptr);
DCHECK(!chunk_->IsLargePage());
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
int slots = RememberedSetSweeping::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
}
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
// processed.
......@@ -4534,18 +4495,15 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
const bool contains_old_to_new_sweeping_slots =
chunk->sweeping_slot_set() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
!contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
!contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) {
items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
......@@ -4751,8 +4709,6 @@ void ReRecordPage(
// might not have recorded them in first place.
// Remove outdated slots.
RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
......@@ -5629,14 +5585,6 @@ class PageMarkingItem : public ParallelWorkItem {
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSetSweeping::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
......
......@@ -57,7 +57,6 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(SlotSet* [kNumSets], SlotSet),
FIELD(ProgressBar, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount),
FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
FIELD(void* [kNumSets], InvalidatedSlots),
FIELD(base::Mutex*, Mutex),
......
......@@ -131,14 +131,12 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
}
base::AsAtomicPointer::Release_Store(&sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
nullptr);
invalidated_slots_[OLD_TO_NEW] = nullptr;
invalidated_slots_[OLD_TO_OLD] = nullptr;
invalidated_slots_[OLD_TO_SHARED] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// Not actually used but initialize anyway for predictability.
invalidated_slots_[OLD_TO_CODE] = nullptr;
......@@ -245,7 +243,6 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
possibly_empty_buckets_.Release();
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSweepingSlotSet();
ReleaseSlotSet<OLD_TO_OLD>();
if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
......@@ -278,10 +275,6 @@ SlotSet* MemoryChunk::AllocateSlotSet() {
return AllocateSlotSet(&slot_set_[type]);
}
SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
return AllocateSlotSet(&sweeping_slot_set_);
}
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = SlotSet::Allocate(buckets());
SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
......@@ -306,10 +299,6 @@ void MemoryChunk::ReleaseSlotSet() {
ReleaseSlotSet(&slot_set_[type]);
}
void MemoryChunk::ReleaseSweepingSlotSet() {
ReleaseSlotSet(&sweeping_slot_set_);
}
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) {
SlotSet::Delete(*slot_set, buckets());
......@@ -442,9 +431,6 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(),
MemoryChunkLayout::kLiveByteCountOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->sweeping_slot_set_) - chunk->address(),
MemoryChunkLayout::kSweepingSlotSetOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(),
MemoryChunkLayout::kTypedSlotSetOffset);
......
......@@ -115,13 +115,6 @@ class MemoryChunk : public BasicMemoryChunk {
return slot_set_[type];
}
template <AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* sweeping_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
return sweeping_slot_set_;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
......@@ -138,7 +131,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set);
void ReleaseSweepingSlotSet();
template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently.
......@@ -255,7 +248,6 @@ class MemoryChunk : public BasicMemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
......
......@@ -134,14 +134,6 @@ void PagedSpace::RefillFreeList() {
});
}
// Also merge old-to-new remembered sets if not scavenging because of
// data races: One thread might iterate remembered set, while another
// thread merges them.
if (compaction_space_kind() !=
CompactionSpaceKind::kCompactionSpaceForScavenge) {
p->MergeOldToNewRememberedSets();
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
......@@ -187,8 +179,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++);
p->MergeOldToNewRememberedSets();
// Ensure that pages are initialized before objects on it are discovered by
// concurrent markers.
p->InitializationMemoryFence();
......@@ -642,14 +632,10 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (IsSweepingAllowedOnThread(local_heap)) {
// Now contribute to sweeping from background thread and then try to
// reallocate.
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
const int kMaxPagesToSweep = 1;
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
invalidated_slots_in_free_space);
identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
RefillFreeList();
......@@ -1008,15 +994,14 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause.
Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
Sweeper::SweepingMode sweeping_mode =
is_compaction_space() ? Sweeper::SweepingMode::kEagerDuringGC
: Sweeper::SweepingMode::kLazyOrConcurrent;
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes,
max_pages,
invalidated_slots_in_free_space);
collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
required_freed_bytes, max_pages);
RefillFreeList();
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
......
......@@ -153,11 +153,8 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slot_set = chunk->slot_set<type>();
SlotSet* sweeping_slot_set =
type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
if (slot_set != nullptr || sweeping_slot_set != nullptr ||
typed_slot_set != nullptr ||
if (slot_set != nullptr || typed_slot_set != nullptr ||
chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
......@@ -351,46 +348,6 @@ class UpdateTypedSlotHelper {
}
};
class RememberedSetSweeping {
public:
template <AccessMode access_mode>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSweepingSlotSet();
}
RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
}
static void Remove(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->sweeping_slot_set();
RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
}
// Iterates and filters the remembered set in the given memory chunk with
// the given callback. The callback should take (Address slot) and return
// SlotCallbackResult.
//
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots.
template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
};
} // namespace internal
} // namespace v8
......
......@@ -124,13 +124,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
// Sweeper is stopped during scavenge, so we can directly
// insert into its remembered set here.
if (chunk->sweeping_slot_set()) {
RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
} else {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
}
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else if (record_slots_ &&
......@@ -302,9 +297,8 @@ void ScavengerCollector::CollectGarbage() {
// access to the slots of a page and can completely avoid any locks on
// the page itself.
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
});
filter_scope.FilterOldSpaceSweepingPages(
[](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
const bool is_logging = isolate_->LogObjectRelocation();
for (int i = 0; i < num_scavenge_tasks; ++i) {
......@@ -639,17 +633,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
&empty_chunks_local_);
}
if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSetSweeping::Iterate(
page,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
}
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were
// processed.
......
......@@ -99,33 +99,6 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page;
}
void Page::MoveOldToNewRememberedSetForSweeping() {
CHECK_NULL(sweeping_slot_set_);
sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
slot_set_[OLD_TO_NEW] = nullptr;
}
void Page::MergeOldToNewRememberedSets() {
if (sweeping_slot_set_ == nullptr) return;
if (slot_set_[OLD_TO_NEW]) {
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
ReleaseSlotSet<OLD_TO_NEW>();
}
CHECK_NULL(slot_set_[OLD_TO_NEW]);
slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
sweeping_slot_set_ = nullptr;
}
size_t Page::AvailableInFreeList() {
size_t sum = 0;
ForAllFreeListCategories([&sum](FreeListCategory* category) {
......@@ -172,7 +145,6 @@ size_t Page::ShrinkToHighWaterMark() {
// area would not be freed when deallocating this page.
DCHECK_NULL(slot_set<OLD_TO_NEW>());
DCHECK_NULL(slot_set<OLD_TO_OLD>());
DCHECK_NULL(sweeping_slot_set());
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize());
......
......@@ -309,9 +309,6 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories();
void ReleaseFreeListCategories();
void MoveOldToNewRememberedSetForSweeping();
void MergeOldToNewRememberedSets();
ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
private:
......
......@@ -189,15 +189,6 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
return nullptr;
}
void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
base::MutexGuard guard(&mutex_);
ForAllSweepingSpaces([this](AllocationSpace space) {
SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
});
}
void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return;
......@@ -205,8 +196,9 @@ void Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
ForAllSweepingSpaces(
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
ForAllSweepingSpaces([this](AllocationSpace space) {
ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
});
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
......@@ -218,13 +210,14 @@ void Sweeper::EnsureCompleted() {
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
if (!sweeping_in_progress_) return;
ParallelSweepSpace(space, 0);
ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
}
void Sweeper::SupportConcurrentSweeping() {
ForAllSweepingSpaces([this](AllocationSpace space) {
const int kMaxPagesToSweepPerSpace = 1;
ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace);
ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0,
kMaxPagesToSweepPerSpace);
});
}
......@@ -260,10 +253,17 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page,
bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
InvalidatedSlotsCleanup* old_to_new_cleanup) {
SweepingMode sweeping_mode, InvalidatedSlotsCleanup* old_to_new_cleanup) {
DCHECK_LE(free_start, free_end);
RememberedSetSweeping::RemoveRange(page, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (sweeping_mode == SweepingMode::kEagerDuringGC) {
// New space and in consequence the old-to-new remembered set is always
// empty after a full GC, so we do not need to remove from it after the full
// GC. However, we wouldn't even be allowed to do that, since the main
// thread then owns the old-to-new remembered set. Removing from it from a
// sweeper thread would race with the main thread.
RememberedSet<OLD_TO_NEW>::RemoveRange(page, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
}
RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) {
......@@ -305,11 +305,10 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
}
}
int Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
const base::MutexGuard& page_guard) {
int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
SweepingMode sweeping_mode,
const base::MutexGuard& page_guard) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
......@@ -350,8 +349,7 @@ int Sweeper::RawSweep(
// removed by mark compact's update pointers phase.
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
if (invalidated_slots_in_free_space ==
FreeSpaceMayContainInvalidatedSlots::kYes)
if (sweeping_mode == SweepingMode::kEagerDuringGC)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
// The free ranges map is used for filtering typed slots.
......@@ -380,7 +378,7 @@ int Sweeper::RawSweep(
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup);
sweeping_mode, &old_to_new_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
// Map might be forwarded during GC.
......@@ -410,7 +408,7 @@ int Sweeper::RawSweep(
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup);
sweeping_mode, &old_to_new_cleanup);
}
// Phase 3: Post process the page.
......@@ -445,9 +443,9 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
if (page == nullptr) return true;
// Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X.
DCHECK(!page->typed_slot_set<OLD_TO_NEW>() &&
!page->typed_slot_set<OLD_TO_OLD>());
ParallelSweepPage(page, identity);
DCHECK_NULL((page->typed_slot_set<OLD_TO_NEW>()));
DCHECK_NULL((page->typed_slot_set<OLD_TO_OLD>()));
ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
return false;
}
......@@ -457,22 +455,21 @@ bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
ThreadKind::kMain);
const double start = heap_->MonotonicallyIncreasingTimeInMs();
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
}
const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start;
heap_->tracer()->AddIncrementalSweepingStep(duration);
return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
int Sweeper::ParallelSweepSpace(
AllocationSpace identity, int required_freed_bytes, int max_pages,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed =
ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
int freed = ParallelSweepPage(page, identity, sweeping_mode);
++pages_freed;
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on.
......@@ -487,9 +484,8 @@ int Sweeper::ParallelSweepSpace(
return max_freed;
}
int Sweeper::ParallelSweepPage(
Page* page, AllocationSpace identity,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
SweepingMode sweeping_mode) {
DCHECK(IsValidSweepingSpace(identity));
// The Scavenger may add already swept pages back.
......@@ -510,7 +506,7 @@ int Sweeper::ParallelSweepPage(
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
invalidated_slots_in_free_space, guard);
sweeping_mode, guard);
DCHECK(page->SweepingDone());
}
......@@ -529,7 +525,7 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept.
ParallelSweepPage(page, space);
ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
} else {
// Some sweeper task already took ownership of that page, wait until
// sweeping is finished.
......@@ -596,7 +592,6 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list()));
});
#endif // DEBUG
page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
......@@ -693,7 +688,7 @@ void Sweeper::MakeIterable(Page* page) {
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
FreeSpaceMayContainInvalidatedSlots::kNo, guard);
SweepingMode::kLazyOrConcurrent, guard);
}
} // namespace internal
......
......@@ -73,7 +73,7 @@ class Sweeper {
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
enum class FreeSpaceMayContainInvalidatedSlots { kYes, kNo };
enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
......@@ -83,24 +83,18 @@ class Sweeper {
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace(
AllocationSpace identity, int required_freed_bytes, int max_pages = 0,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
FreeSpaceMayContainInvalidatedSlots::kNo);
int ParallelSweepPage(
Page* page, AllocationSpace identity,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
FreeSpaceMayContainInvalidatedSlots::kNo);
int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity,
SweepingMode sweeping_mode);
void EnsurePageIsSwept(Page* page);
void ScheduleIncrementalSweepingTask();
int RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
const base::MutexGuard& page_guard);
int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
......@@ -119,7 +113,6 @@ class Sweeper {
void AddPageForIterability(Page* page);
void StartIterabilityTasks();
void EnsureIterabilityCompleted();
void MergeOldToNewRememberedSetsForSweptPages();
private:
class IncrementalSweeperTask;
......@@ -152,7 +145,7 @@ class Sweeper {
void CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page,
bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
InvalidatedSlotsCleanup* old_to_new_cleanup);
SweepingMode sweeping_mode, InvalidatedSlotsCleanup* old_to_new_cleanup);
// Helper function for RawSweep. Clears invalid typed slots in the given free
// ranges.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment