Commit ca505562 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Remove sweeping_slot_set_ from MemoryChunk

Since the new space is always empty after a full GC, the old-to-new
remembered set is also always empty after a full GC. This means we can
get rid of the sweeping_slot_set_.

This slot set was used to allow the main thread to insert into the
old-to-new remembered set non-atomically. The sweeping slot set was
owned by the sweeper, which deletes slots in free memory from it. The
main thread would start with an empty old-to-new remembered set. After
sweeping both slot sets are merged again.

The sweeper now needs to behave differently during a GC. When sweeping
a page during full GC, the sweeper needs to delete old-to-new-slots in
free memory.

Outside of the GC the sweeper isn't allowed to remove from the
old-to-new slots anymore. This would race with the main thread that adds
slots to that remembered set while the sweeper is running. However,
there should be no recorded slots in free memory. DCHECKing this is
tricky though, because we would need to synchronize with the main
thread right-trimming objects and at least String::MakeThin only deletes
slots after the map release-store.

Bug: v8:12760
Change-Id: Ic0301851a714e894c3040595f456ab93b5875c81
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3560638Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79713}
parent c3ed607d
...@@ -4724,18 +4724,6 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end, ...@@ -4724,18 +4724,6 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
return KEEP_SLOT; return KEEP_SLOT;
}, },
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
if (direction == OLD_TO_NEW) {
CHECK(chunk->SweepingDone());
RememberedSetSweeping::Iterate(
chunk,
[start, end, untyped](MaybeObjectSlot slot) {
if (start <= slot.address() && slot.address() < end) {
untyped->insert(slot.address());
}
return KEEP_SLOT;
},
SlotSet::FREE_EMPTY_BUCKETS);
}
RememberedSet<direction>::IterateTyped( RememberedSet<direction>::IterateTyped(
chunk, [=](SlotType type, Address slot) { chunk, [=](SlotType type, Address slot) {
if (start <= slot && slot < end) { if (start <= slot && slot < end) {
......
...@@ -80,7 +80,6 @@ Address LargePage::GetAddressToShrink(Address object_address, ...@@ -80,7 +80,6 @@ Address LargePage::GetAddressToShrink(Address object_address,
} }
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
DCHECK_NULL(this->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(), RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
......
...@@ -1517,7 +1517,6 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases { ...@@ -1517,7 +1517,6 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host); MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone()); DCHECK(chunk->SweepingDone());
DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot); RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
} else if (p->IsEvacuationCandidate()) { } else if (p->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL && if (V8_EXTERNAL_CODE_SPACE_BOOL &&
...@@ -2634,7 +2633,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI( ...@@ -2634,7 +2633,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start); MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid. // Clear any recorded slots for the compiled data as being invalid.
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange( RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size, chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
...@@ -2887,7 +2885,6 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array, ...@@ -2887,7 +2885,6 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address(); Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address(); Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(array); MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
DCHECK_NULL(chunk->sweeping_slot_set());
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end, RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end, RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
...@@ -3978,13 +3975,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3978,13 +3975,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
this, std::move(evacuation_items), nullptr); this, std::move(evacuation_items), nullptr);
} }
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
// sweeper's swept_list_. Merge remembered sets for those pages as
// well such that after mark-compact all pages either store slots
// in the sweeping or old-to-new remembered set.
sweeper()->MergeOldToNewRememberedSetsForSweptPages();
const size_t aborted_pages = PostProcessEvacuationCandidates(); const size_t aborted_pages = PostProcessEvacuationCandidates();
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
...@@ -4375,11 +4365,6 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -4375,11 +4365,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void UpdateUntypedPointers() { void UpdateUntypedPointers() {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) { if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
DCHECK_IMPLIES(
collector == GarbageCollector::MARK_COMPACTOR,
chunk_->SweepingDone() &&
chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
int slots = RememberedSet<OLD_TO_NEW>::Iterate( int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_, chunk_,
...@@ -4396,30 +4381,6 @@ class RememberedSetUpdatingItem : public UpdatingItem { ...@@ -4396,30 +4381,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
} }
} }
if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
DCHECK_IMPLIES(
collector == GarbageCollector::MARK_COMPACTOR,
!chunk_->SweepingDone() &&
(chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
nullptr);
DCHECK(!chunk_->IsLargePage());
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
int slots = RememberedSetSweeping::Iterate(
chunk_,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndUpdateOldToNewSlot(slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
}
}
if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were // The invalidated slots are not needed after old-to-new slots were
// processed. // processed.
...@@ -4534,18 +4495,15 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems( ...@@ -4534,18 +4495,15 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots = const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr || chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr; chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
const bool contains_old_to_new_sweeping_slots =
chunk->sweeping_slot_set() != nullptr;
const bool contains_old_to_old_invalidated_slots = const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr; chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots = const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr; chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots && if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
!contains_old_to_old_slots && !contains_old_to_old_invalidated_slots && !contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots && !contains_old_to_code_slots) !contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
continue; continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots || if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
contains_old_to_new_sweeping_slots ||
contains_old_to_old_invalidated_slots || contains_old_to_old_invalidated_slots ||
contains_old_to_new_invalidated_slots) { contains_old_to_new_invalidated_slots) {
items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode)); items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
...@@ -4751,8 +4709,6 @@ void ReRecordPage( ...@@ -4751,8 +4709,6 @@ void ReRecordPage(
// might not have recorded them in first place. // might not have recorded them in first place.
// Remove outdated slots. // Remove outdated slots.
RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start, RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(), RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
...@@ -5629,14 +5585,6 @@ class PageMarkingItem : public ParallelWorkItem { ...@@ -5629,14 +5585,6 @@ class PageMarkingItem : public ParallelWorkItem {
return CheckAndMarkObject(task, slot); return CheckAndMarkObject(task, slot);
}, },
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
filter = InvalidatedSlotsFilter::OldToNew(chunk_);
RememberedSetSweeping::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
} }
void MarkTypedPointers(YoungGenerationMarkingTask* task) { void MarkTypedPointers(YoungGenerationMarkingTask* task) {
......
...@@ -57,7 +57,6 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout { ...@@ -57,7 +57,6 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(SlotSet* [kNumSets], SlotSet), FIELD(SlotSet* [kNumSets], SlotSet),
FIELD(ProgressBar, ProgressBar), FIELD(ProgressBar, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount), FIELD(std::atomic<intptr_t>, LiveByteCount),
FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet), FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
FIELD(void* [kNumSets], InvalidatedSlots), FIELD(void* [kNumSets], InvalidatedSlots),
FIELD(base::Mutex*, Mutex), FIELD(base::Mutex*, Mutex),
......
...@@ -131,14 +131,12 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size, ...@@ -131,14 +131,12 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
if (V8_EXTERNAL_CODE_SPACE_BOOL) { if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr); base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
} }
base::AsAtomicPointer::Release_Store(&sweeping_slot_set_, nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr); base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr); base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED], base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
nullptr); nullptr);
invalidated_slots_[OLD_TO_NEW] = nullptr; invalidated_slots_[OLD_TO_NEW] = nullptr;
invalidated_slots_[OLD_TO_OLD] = nullptr; invalidated_slots_[OLD_TO_OLD] = nullptr;
invalidated_slots_[OLD_TO_SHARED] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) { if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// Not actually used but initialize anyway for predictability. // Not actually used but initialize anyway for predictability.
invalidated_slots_[OLD_TO_CODE] = nullptr; invalidated_slots_[OLD_TO_CODE] = nullptr;
...@@ -245,7 +243,6 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() { ...@@ -245,7 +243,6 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
possibly_empty_buckets_.Release(); possibly_empty_buckets_.Release();
ReleaseSlotSet<OLD_TO_NEW>(); ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSweepingSlotSet();
ReleaseSlotSet<OLD_TO_OLD>(); ReleaseSlotSet<OLD_TO_OLD>();
if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>(); if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
ReleaseTypedSlotSet<OLD_TO_NEW>(); ReleaseTypedSlotSet<OLD_TO_NEW>();
...@@ -278,10 +275,6 @@ SlotSet* MemoryChunk::AllocateSlotSet() { ...@@ -278,10 +275,6 @@ SlotSet* MemoryChunk::AllocateSlotSet() {
return AllocateSlotSet(&slot_set_[type]); return AllocateSlotSet(&slot_set_[type]);
} }
SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
return AllocateSlotSet(&sweeping_slot_set_);
}
SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) { SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
SlotSet* new_slot_set = SlotSet::Allocate(buckets()); SlotSet* new_slot_set = SlotSet::Allocate(buckets());
SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap( SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
...@@ -306,10 +299,6 @@ void MemoryChunk::ReleaseSlotSet() { ...@@ -306,10 +299,6 @@ void MemoryChunk::ReleaseSlotSet() {
ReleaseSlotSet(&slot_set_[type]); ReleaseSlotSet(&slot_set_[type]);
} }
void MemoryChunk::ReleaseSweepingSlotSet() {
ReleaseSlotSet(&sweeping_slot_set_);
}
void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) { void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
if (*slot_set) { if (*slot_set) {
SlotSet::Delete(*slot_set, buckets()); SlotSet::Delete(*slot_set, buckets());
...@@ -442,9 +431,6 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) { ...@@ -442,9 +431,6 @@ void MemoryChunk::ValidateOffsets(MemoryChunk* chunk) {
DCHECK_EQ( DCHECK_EQ(
reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(), reinterpret_cast<Address>(&chunk->live_byte_count_) - chunk->address(),
MemoryChunkLayout::kLiveByteCountOffset); MemoryChunkLayout::kLiveByteCountOffset);
DCHECK_EQ(
reinterpret_cast<Address>(&chunk->sweeping_slot_set_) - chunk->address(),
MemoryChunkLayout::kSweepingSlotSetOffset);
DCHECK_EQ( DCHECK_EQ(
reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(), reinterpret_cast<Address>(&chunk->typed_slot_set_) - chunk->address(),
MemoryChunkLayout::kTypedSlotSetOffset); MemoryChunkLayout::kTypedSlotSetOffset);
......
...@@ -115,13 +115,6 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -115,13 +115,6 @@ class MemoryChunk : public BasicMemoryChunk {
return slot_set_[type]; return slot_set_[type];
} }
template <AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* sweeping_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
return sweeping_slot_set_;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC> template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() { TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC) if (access_mode == AccessMode::ATOMIC)
...@@ -138,7 +131,7 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -138,7 +131,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type> template <RememberedSetType type>
void ReleaseSlotSet(); void ReleaseSlotSet();
void ReleaseSlotSet(SlotSet** slot_set); void ReleaseSlotSet(SlotSet** slot_set);
void ReleaseSweepingSlotSet();
template <RememberedSetType type> template <RememberedSetType type>
TypedSlotSet* AllocateTypedSlotSet(); TypedSlotSet* AllocateTypedSlotSet();
// Not safe to be called concurrently. // Not safe to be called concurrently.
...@@ -255,7 +248,6 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -255,7 +248,6 @@ class MemoryChunk : public BasicMemoryChunk {
// A single slot set for small pages (of size kPageSize) or an array of slot // A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array // set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize). // is ceil(size() / kPageSize).
SlotSet* sweeping_slot_set_;
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES]; TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES]; InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
......
...@@ -134,14 +134,6 @@ void PagedSpace::RefillFreeList() { ...@@ -134,14 +134,6 @@ void PagedSpace::RefillFreeList() {
}); });
} }
// Also merge old-to-new remembered sets if not scavenging because of
// data races: One thread might iterate remembered set, while another
// thread merges them.
if (compaction_space_kind() !=
CompactionSpaceKind::kCompactionSpaceForScavenge) {
p->MergeOldToNewRememberedSets();
}
// Only during compaction pages can actually change ownership. This is // Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links // safe because there exists no other competing action on the page links
// during compaction. // during compaction.
...@@ -187,8 +179,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -187,8 +179,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
for (auto it = other->begin(); it != other->end();) { for (auto it = other->begin(); it != other->end();) {
Page* p = *(it++); Page* p = *(it++);
p->MergeOldToNewRememberedSets();
// Ensure that pages are initialized before objects on it are discovered by // Ensure that pages are initialized before objects on it are discovered by
// concurrent markers. // concurrent markers.
p->InitializationMemoryFence(); p->InitializationMemoryFence();
...@@ -642,14 +632,10 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground( ...@@ -642,14 +632,10 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (IsSweepingAllowedOnThread(local_heap)) { if (IsSweepingAllowedOnThread(local_heap)) {
// Now contribute to sweeping from background thread and then try to // Now contribute to sweeping from background thread and then try to
// reallocate. // reallocate.
Sweeper::FreeSpaceMayContainInvalidatedSlots
invalidated_slots_in_free_space =
Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
const int kMaxPagesToSweep = 1; const int kMaxPagesToSweep = 1;
int max_freed = collector->sweeper()->ParallelSweepSpace( int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep, identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
invalidated_slots_in_free_space); static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
RefillFreeList(); RefillFreeList();
...@@ -1008,15 +994,14 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes, ...@@ -1008,15 +994,14 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
AllocationOrigin origin) { AllocationOrigin origin) {
// Cleanup invalidated old-to-new refs for compaction space in the // Cleanup invalidated old-to-new refs for compaction space in the
// final atomic pause. // final atomic pause.
Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space = Sweeper::SweepingMode sweeping_mode =
is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes is_compaction_space() ? Sweeper::SweepingMode::kEagerDuringGC
: Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo; : Sweeper::SweepingMode::kLazyOrConcurrent;
MarkCompactCollector* collector = heap()->mark_compact_collector(); MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) { if (collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes, collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
max_pages, required_freed_bytes, max_pages);
invalidated_slots_in_free_space);
RefillFreeList(); RefillFreeList();
return TryAllocationFromFreeListMain(size_in_bytes, origin); return TryAllocationFromFreeListMain(size_in_bytes, origin);
} }
......
...@@ -153,11 +153,8 @@ class RememberedSet : public AllStatic { ...@@ -153,11 +153,8 @@ class RememberedSet : public AllStatic {
MemoryChunk* chunk; MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) { while ((chunk = it.next()) != nullptr) {
SlotSet* slot_set = chunk->slot_set<type>(); SlotSet* slot_set = chunk->slot_set<type>();
SlotSet* sweeping_slot_set =
type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>(); TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
if (slot_set != nullptr || sweeping_slot_set != nullptr || if (slot_set != nullptr || typed_slot_set != nullptr ||
typed_slot_set != nullptr ||
chunk->invalidated_slots<type>() != nullptr) { chunk->invalidated_slots<type>() != nullptr) {
callback(chunk); callback(chunk);
} }
...@@ -351,46 +348,6 @@ class UpdateTypedSlotHelper { ...@@ -351,46 +348,6 @@ class UpdateTypedSlotHelper {
} }
}; };
class RememberedSetSweeping {
public:
template <AccessMode access_mode>
static void Insert(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
if (slot_set == nullptr) {
slot_set = chunk->AllocateSweepingSlotSet();
}
RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
}
static void Remove(MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->sweeping_slot_set();
RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
}
// Iterates and filters the remembered set in the given memory chunk with
// the given callback. The callback should take (Address slot) and return
// SlotCallbackResult.
//
// Notice that |mode| can only be of FREE* or PREFREE* if there are no other
// threads concurrently inserting slots.
template <typename Callback>
static int Iterate(MemoryChunk* chunk, Callback callback,
SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = chunk->sweeping_slot_set();
return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
}
};
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -124,13 +124,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor { ...@@ -124,13 +124,8 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
// Sweeper is stopped during scavenge, so we can directly // Sweeper is stopped during scavenge, so we can directly
// insert into its remembered set here. // insert into its remembered set here.
if (chunk->sweeping_slot_set()) { RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk, slot.address());
slot.address());
} else {
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
slot.address());
}
} }
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target)); SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else if (record_slots_ && } else if (record_slots_ &&
...@@ -302,9 +297,8 @@ void ScavengerCollector::CollectGarbage() { ...@@ -302,9 +297,8 @@ void ScavengerCollector::CollectGarbage() {
// access to the slots of a page and can completely avoid any locks on // access to the slots of a page and can completely avoid any locks on
// the page itself. // the page itself.
Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope); Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
filter_scope.FilterOldSpaceSweepingPages([](Page* page) { filter_scope.FilterOldSpaceSweepingPages(
return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set(); [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
});
const bool is_logging = isolate_->LogObjectRelocation(); const bool is_logging = isolate_->LogObjectRelocation();
for (int i = 0; i < num_scavenge_tasks; ++i) { for (int i = 0; i < num_scavenge_tasks; ++i) {
...@@ -639,17 +633,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) { ...@@ -639,17 +633,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
&empty_chunks_local_); &empty_chunks_local_);
} }
if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
RememberedSetSweeping::Iterate(
page,
[this, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
}
if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) { if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
// The invalidated slots are not needed after old-to-new slots were // The invalidated slots are not needed after old-to-new slots were
// processed. // processed.
......
...@@ -99,33 +99,6 @@ Page* Page::ConvertNewToOld(Page* old_page) { ...@@ -99,33 +99,6 @@ Page* Page::ConvertNewToOld(Page* old_page) {
return new_page; return new_page;
} }
void Page::MoveOldToNewRememberedSetForSweeping() {
CHECK_NULL(sweeping_slot_set_);
sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
slot_set_[OLD_TO_NEW] = nullptr;
}
void Page::MergeOldToNewRememberedSets() {
if (sweeping_slot_set_ == nullptr) return;
if (slot_set_[OLD_TO_NEW]) {
RememberedSet<OLD_TO_NEW>::Iterate(
this,
[this](MaybeObjectSlot slot) {
Address address = slot.address();
RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
ReleaseSlotSet<OLD_TO_NEW>();
}
CHECK_NULL(slot_set_[OLD_TO_NEW]);
slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
sweeping_slot_set_ = nullptr;
}
size_t Page::AvailableInFreeList() { size_t Page::AvailableInFreeList() {
size_t sum = 0; size_t sum = 0;
ForAllFreeListCategories([&sum](FreeListCategory* category) { ForAllFreeListCategories([&sum](FreeListCategory* category) {
...@@ -172,7 +145,6 @@ size_t Page::ShrinkToHighWaterMark() { ...@@ -172,7 +145,6 @@ size_t Page::ShrinkToHighWaterMark() {
// area would not be freed when deallocating this page. // area would not be freed when deallocating this page.
DCHECK_NULL(slot_set<OLD_TO_NEW>()); DCHECK_NULL(slot_set<OLD_TO_NEW>());
DCHECK_NULL(slot_set<OLD_TO_OLD>()); DCHECK_NULL(slot_set<OLD_TO_OLD>());
DCHECK_NULL(sweeping_slot_set());
size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()), size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize()); MemoryAllocator::GetCommitPageSize());
......
...@@ -309,9 +309,6 @@ class Page : public MemoryChunk { ...@@ -309,9 +309,6 @@ class Page : public MemoryChunk {
void AllocateFreeListCategories(); void AllocateFreeListCategories();
void ReleaseFreeListCategories(); void ReleaseFreeListCategories();
void MoveOldToNewRememberedSetForSweeping();
void MergeOldToNewRememberedSets();
ActiveSystemPages* active_system_pages() { return &active_system_pages_; } ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
private: private:
......
...@@ -189,15 +189,6 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) { ...@@ -189,15 +189,6 @@ Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
return nullptr; return nullptr;
} }
void Sweeper::MergeOldToNewRememberedSetsForSweptPages() {
base::MutexGuard guard(&mutex_);
ForAllSweepingSpaces([this](AllocationSpace space) {
SweptList& swept_list = swept_list_[GetSweepSpaceIndex(space)];
for (Page* p : swept_list) p->MergeOldToNewRememberedSets();
});
}
void Sweeper::EnsureCompleted() { void Sweeper::EnsureCompleted() {
if (!sweeping_in_progress_) return; if (!sweeping_in_progress_) return;
...@@ -205,8 +196,9 @@ void Sweeper::EnsureCompleted() { ...@@ -205,8 +196,9 @@ void Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it // If sweeping is not completed or not running at all, we try to complete it
// here. // here.
ForAllSweepingSpaces( ForAllSweepingSpaces([this](AllocationSpace space) {
[this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
});
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join(); if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
...@@ -218,13 +210,14 @@ void Sweeper::EnsureCompleted() { ...@@ -218,13 +210,14 @@ void Sweeper::EnsureCompleted() {
void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) { void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
if (!sweeping_in_progress_) return; if (!sweeping_in_progress_) return;
ParallelSweepSpace(space, 0); ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
} }
void Sweeper::SupportConcurrentSweeping() { void Sweeper::SupportConcurrentSweeping() {
ForAllSweepingSpaces([this](AllocationSpace space) { ForAllSweepingSpaces([this](AllocationSpace space) {
const int kMaxPagesToSweepPerSpace = 1; const int kMaxPagesToSweepPerSpace = 1;
ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace); ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0,
kMaxPagesToSweepPerSpace);
}); });
} }
...@@ -260,10 +253,17 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory( ...@@ -260,10 +253,17 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory( V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page, Address free_start, Address free_end, Page* page,
bool non_empty_typed_slots, FreeRangesMap* free_ranges_map, bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
InvalidatedSlotsCleanup* old_to_new_cleanup) { SweepingMode sweeping_mode, InvalidatedSlotsCleanup* old_to_new_cleanup) {
DCHECK_LE(free_start, free_end); DCHECK_LE(free_start, free_end);
RememberedSetSweeping::RemoveRange(page, free_start, free_end, if (sweeping_mode == SweepingMode::kEagerDuringGC) {
SlotSet::KEEP_EMPTY_BUCKETS); // New space and in consequence the old-to-new remembered set is always
// empty after a full GC, so we do not need to remove from it after the full
// GC. However, we wouldn't even be allowed to do that, since the main
// thread then owns the old-to-new remembered set. Removing from it from a
// sweeper thread would race with the main thread.
RememberedSet<OLD_TO_NEW>::RemoveRange(page, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
}
RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end, RememberedSet<OLD_TO_OLD>::RemoveRange(page, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS); SlotSet::KEEP_EMPTY_BUCKETS);
if (non_empty_typed_slots) { if (non_empty_typed_slots) {
...@@ -305,11 +305,10 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics( ...@@ -305,11 +305,10 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(
} }
} }
int Sweeper::RawSweep( int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
Page* p, FreeListRebuildingMode free_list_mode, FreeSpaceTreatmentMode free_space_mode,
FreeSpaceTreatmentMode free_space_mode, SweepingMode sweeping_mode,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space, const base::MutexGuard& page_guard) {
const base::MutexGuard& page_guard) {
Space* space = p->owner(); Space* space = p->owner();
DCHECK_NOT_NULL(space); DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE || DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
...@@ -350,8 +349,7 @@ int Sweeper::RawSweep( ...@@ -350,8 +349,7 @@ int Sweeper::RawSweep(
// removed by mark compact's update pointers phase. // removed by mark compact's update pointers phase.
InvalidatedSlotsCleanup old_to_new_cleanup = InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p); InvalidatedSlotsCleanup::NoCleanup(p);
if (invalidated_slots_in_free_space == if (sweeping_mode == SweepingMode::kEagerDuringGC)
FreeSpaceMayContainInvalidatedSlots::kYes)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p); old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
// The free ranges map is used for filtering typed slots. // The free ranges map is used for filtering typed slots.
...@@ -380,7 +378,7 @@ int Sweeper::RawSweep( ...@@ -380,7 +378,7 @@ int Sweeper::RawSweep(
free_list_mode, free_space_mode)); free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory( CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map, free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup); sweeping_mode, &old_to_new_cleanup);
} }
Map map = object.map(cage_base, kAcquireLoad); Map map = object.map(cage_base, kAcquireLoad);
// Map might be forwarded during GC. // Map might be forwarded during GC.
...@@ -410,7 +408,7 @@ int Sweeper::RawSweep( ...@@ -410,7 +408,7 @@ int Sweeper::RawSweep(
free_list_mode, free_space_mode)); free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory( CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map, free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup); sweeping_mode, &old_to_new_cleanup);
} }
// Phase 3: Post process the page. // Phase 3: Post process the page.
...@@ -445,9 +443,9 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity, ...@@ -445,9 +443,9 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
if (page == nullptr) return true; if (page == nullptr) return true;
// Typed slot sets are only recorded on code pages. Code pages // Typed slot sets are only recorded on code pages. Code pages
// are not swept concurrently to the application to ensure W^X. // are not swept concurrently to the application to ensure W^X.
DCHECK(!page->typed_slot_set<OLD_TO_NEW>() && DCHECK_NULL((page->typed_slot_set<OLD_TO_NEW>()));
!page->typed_slot_set<OLD_TO_OLD>()); DCHECK_NULL((page->typed_slot_set<OLD_TO_OLD>()));
ParallelSweepPage(page, identity); ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
} }
return false; return false;
} }
...@@ -457,22 +455,21 @@ bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) { ...@@ -457,22 +455,21 @@ bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
ThreadKind::kMain); ThreadKind::kMain);
const double start = heap_->MonotonicallyIncreasingTimeInMs(); const double start = heap_->MonotonicallyIncreasingTimeInMs();
if (Page* page = GetSweepingPageSafe(identity)) { if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity); ParallelSweepPage(page, identity, SweepingMode::kLazyOrConcurrent);
} }
const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start; const double duration = heap_->MonotonicallyIncreasingTimeInMs() - start;
heap_->tracer()->AddIncrementalSweepingStep(duration); heap_->tracer()->AddIncrementalSweepingStep(duration);
return sweeping_list_[GetSweepSpaceIndex(identity)].empty(); return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
} }
int Sweeper::ParallelSweepSpace( int Sweeper::ParallelSweepSpace(AllocationSpace identity,
AllocationSpace identity, int required_freed_bytes, int max_pages, SweepingMode sweeping_mode,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) { int required_freed_bytes, int max_pages) {
int max_freed = 0; int max_freed = 0;
int pages_freed = 0; int pages_freed = 0;
Page* page = nullptr; Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) { while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed = int freed = ParallelSweepPage(page, identity, sweeping_mode);
ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
++pages_freed; ++pages_freed;
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on. // Free list of a never-allocate page will be dropped later on.
...@@ -487,9 +484,8 @@ int Sweeper::ParallelSweepSpace( ...@@ -487,9 +484,8 @@ int Sweeper::ParallelSweepSpace(
return max_freed; return max_freed;
} }
int Sweeper::ParallelSweepPage( int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
Page* page, AllocationSpace identity, SweepingMode sweeping_mode) {
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
DCHECK(IsValidSweepingSpace(identity)); DCHECK(IsValidSweepingSpace(identity));
// The Scavenger may add already swept pages back. // The Scavenger may add already swept pages back.
...@@ -510,7 +506,7 @@ int Sweeper::ParallelSweepPage( ...@@ -510,7 +506,7 @@ int Sweeper::ParallelSweepPage(
const FreeSpaceTreatmentMode free_space_mode = const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode, max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
invalidated_slots_in_free_space, guard); sweeping_mode, guard);
DCHECK(page->SweepingDone()); DCHECK(page->SweepingDone());
} }
...@@ -529,7 +525,7 @@ void Sweeper::EnsurePageIsSwept(Page* page) { ...@@ -529,7 +525,7 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
if (IsValidSweepingSpace(space)) { if (IsValidSweepingSpace(space)) {
if (TryRemoveSweepingPageSafe(space, page)) { if (TryRemoveSweepingPageSafe(space, page)) {
// Page was successfully removed and can now be swept. // Page was successfully removed and can now be swept.
ParallelSweepPage(page, space); ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
} else { } else {
// Some sweeper task already took ownership of that page, wait until // Some sweeper task already took ownership of that page, wait until
// sweeping is finished. // sweeping is finished.
...@@ -596,7 +592,6 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) { ...@@ -596,7 +592,6 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
DCHECK(!category->is_linked(page->owner()->free_list())); DCHECK(!category->is_linked(page->owner()->free_list()));
}); });
#endif // DEBUG #endif // DEBUG
page->MoveOldToNewRememberedSetForSweeping();
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending); page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
heap_->paged_space(space)->IncreaseAllocatedBytes( heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page); marking_state_->live_bytes(page), page);
...@@ -693,7 +688,7 @@ void Sweeper::MakeIterable(Page* page) { ...@@ -693,7 +688,7 @@ void Sweeper::MakeIterable(Page* page) {
const FreeSpaceTreatmentMode free_space_mode = const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode, RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
FreeSpaceMayContainInvalidatedSlots::kNo, guard); SweepingMode::kLazyOrConcurrent, guard);
} }
} // namespace internal } // namespace internal
......
...@@ -73,7 +73,7 @@ class Sweeper { ...@@ -73,7 +73,7 @@ class Sweeper {
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST }; enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE }; enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
enum class FreeSpaceMayContainInvalidatedSlots { kYes, kNo }; enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state); Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
...@@ -83,24 +83,18 @@ class Sweeper { ...@@ -83,24 +83,18 @@ class Sweeper {
void AddPage(AllocationSpace space, Page* page, AddPageMode mode); void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
int ParallelSweepSpace( int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
AllocationSpace identity, int required_freed_bytes, int max_pages = 0, int required_freed_bytes, int max_pages = 0);
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space = int ParallelSweepPage(Page* page, AllocationSpace identity,
FreeSpaceMayContainInvalidatedSlots::kNo); SweepingMode sweeping_mode);
int ParallelSweepPage(
Page* page, AllocationSpace identity,
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
FreeSpaceMayContainInvalidatedSlots::kNo);
void EnsurePageIsSwept(Page* page); void EnsurePageIsSwept(Page* page);
void ScheduleIncrementalSweepingTask(); void ScheduleIncrementalSweepingTask();
int RawSweep( int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
Page* p, FreeListRebuildingMode free_list_mode, FreeSpaceTreatmentMode free_space_mode,
FreeSpaceTreatmentMode free_space_mode, SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space,
const base::MutexGuard& page_guard);
// After calling this function sweeping is considered to be in progress // After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks // and the main thread can sweep lazily, but the background sweeper tasks
...@@ -119,7 +113,6 @@ class Sweeper { ...@@ -119,7 +113,6 @@ class Sweeper {
void AddPageForIterability(Page* page); void AddPageForIterability(Page* page);
void StartIterabilityTasks(); void StartIterabilityTasks();
void EnsureIterabilityCompleted(); void EnsureIterabilityCompleted();
void MergeOldToNewRememberedSetsForSweptPages();
private: private:
class IncrementalSweeperTask; class IncrementalSweeperTask;
...@@ -152,7 +145,7 @@ class Sweeper { ...@@ -152,7 +145,7 @@ class Sweeper {
void CleanupRememberedSetEntriesForFreedMemory( void CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page, Address free_start, Address free_end, Page* page,
bool non_empty_typed_slots, FreeRangesMap* free_ranges_map, bool non_empty_typed_slots, FreeRangesMap* free_ranges_map,
InvalidatedSlotsCleanup* old_to_new_cleanup); SweepingMode sweeping_mode, InvalidatedSlotsCleanup* old_to_new_cleanup);
// Helper function for RawSweep. Clears invalid typed slots in the given free // Helper function for RawSweep. Clears invalid typed slots in the given free
// ranges. // ranges.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment