Commit 4dd04c0b authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Clean up invalidated OLD_TO_SHARED slots

With this CL OLD_TO_SHARED slots aren't removed at the end of full GC
anymore. In order to allow for this, invalidated slots need to be
filtered out when iterating the OLD_TO_SHARED remembered set.

* When invalidating slots in an object, that object also needs to be
  recorded for OLD_TO_SHARED.
* The sweeper has to remove invalidated objects in free memory when
  sweeping during a full GC.
* OLD_TO_SHARED slots need to be removed in the evacuated start of
  a page when evacuation fails.
* While local GCs don't need OLD_TO_SHARED, slots need to be filtered
  in order to be able to delete the set of invalidated objects during
  a GC.

Bug: v8:11708
Change-Id: I594307289a797bc0d68edf6793b914805d1285df
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3584113Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80018}
parent a7fcaa5a
......@@ -22,6 +22,11 @@ InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
OLD_TO_NEW);
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToShared(MemoryChunk* chunk) {
return InvalidatedSlotsFilter(
chunk, chunk->invalidated_slots<OLD_TO_SHARED>(), OLD_TO_SHARED);
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
RememberedSetType remembered_set_type) {
......@@ -48,6 +53,12 @@ InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToNew(MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
}
InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToShared(
MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk,
chunk->invalidated_slots<OLD_TO_SHARED>());
}
InvalidatedSlotsCleanup InvalidatedSlotsCleanup::NoCleanup(MemoryChunk* chunk) {
return InvalidatedSlotsCleanup(chunk, nullptr);
}
......
......@@ -33,6 +33,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
public:
static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToShared(MemoryChunk* chunk);
inline bool IsValid(Address slot);
......@@ -60,6 +61,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
public:
static InvalidatedSlotsCleanup OldToNew(MemoryChunk* chunk);
static InvalidatedSlotsCleanup OldToShared(MemoryChunk* chunk);
static InvalidatedSlotsCleanup NoCleanup(MemoryChunk* chunk);
explicit InvalidatedSlotsCleanup(MemoryChunk* chunk,
......
......@@ -3514,7 +3514,7 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_lo_space()->FreeUnmarkedObjects();
}
// Old space. Deallocate evacuated candidate pages.
// Old generation. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
// Give pages that are queued to be freed back to the OS.
......@@ -3528,10 +3528,16 @@ void MarkCompactCollector::EvacuateEpilogue() {
// Old-to-old slot sets must be empty after evacuation.
DCHECK_NULL((chunk->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((chunk->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
DCHECK_NULL((chunk->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
// Old-to-new slot sets must be empty after evacuation.
DCHECK_NULL((chunk->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>()));
DCHECK_NULL((chunk->typed_slot_set<OLD_TO_NEW, AccessMode::ATOMIC>()));
// GCs need to filter invalidated slots.
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_OLD>());
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_SHARED>());
}
#endif
}
......@@ -4452,6 +4458,21 @@ class RememberedSetUpdatingItem : public UpdatingItem {
// processsed, but since there are no invalidated OLD_TO_CODE slots,
// there's nothing to clear.
}
if (updating_mode_ == RememberedSetUpdatingMode::ALL) {
if (chunk_->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()) {
// Client GCs need to remove invalidated OLD_TO_SHARED slots.
DCHECK(!heap_->IsShared());
InvalidatedSlotsFilter filter =
InvalidatedSlotsFilter::OldToShared(chunk_);
RememberedSet<OLD_TO_SHARED>::Iterate(
chunk_,
[&filter](MaybeObjectSlot slot) {
return filter.IsValid(slot.address()) ? KEEP_SLOT : REMOVE_SLOT;
},
SlotSet::FREE_EMPTY_BUCKETS);
}
chunk_->ReleaseInvalidatedSlots<OLD_TO_SHARED>();
}
}
void UpdateTypedPointers() {
......@@ -4517,13 +4538,20 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
const bool contains_old_to_shared_slots =
chunk->slot_set<OLD_TO_SHARED>() != nullptr ||
chunk->typed_slot_set<OLD_TO_SHARED>() != nullptr;
const bool contains_old_to_old_invalidated_slots =
chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
const bool contains_old_to_new_invalidated_slots =
chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
const bool contains_old_to_shared_invalidated_slots =
chunk->invalidated_slots<OLD_TO_SHARED>() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
!contains_old_to_old_invalidated_slots &&
!contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
!contains_old_to_new_invalidated_slots && !contains_old_to_code_slots &&
!contains_old_to_shared_slots &&
!contains_old_to_shared_invalidated_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
contains_old_to_old_invalidated_slots ||
......@@ -4668,14 +4696,16 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
MemoryChunk* chunk = chunk_iterator.Next();
CodePageMemoryModificationScope unprotect_code_page(chunk);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(chunk);
RememberedSet<OLD_TO_SHARED>::Iterate(
chunk,
[cage_base](MaybeObjectSlot slot) {
[cage_base, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
},
SlotSet::KEEP_EMPTY_BUCKETS);
SlotSet::FREE_EMPTY_BUCKETS);
chunk->ReleaseSlotSet<OLD_TO_SHARED>();
chunk->ReleaseInvalidatedSlots<OLD_TO_SHARED>();
RememberedSet<OLD_TO_SHARED>::IterateTyped(
chunk, [this](SlotType slot_type, Address slot) {
......@@ -4688,8 +4718,6 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
slot);
});
});
chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
}
#ifdef VERIFY_HEAP
......@@ -4736,11 +4764,20 @@ void ReRecordPage(
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
failed_start);
RememberedSet<OLD_TO_SHARED>::RemoveRange(page, page->address(), failed_start,
SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_SHARED>::RemoveRangeTyped(page, page->address(),
failed_start);
// Remove invalidated slots.
if (failed_start > page->area_start()) {
InvalidatedSlotsCleanup old_to_new_cleanup =
InvalidatedSlotsCleanup::OldToNew(page);
old_to_new_cleanup.Free(page->area_start(), failed_start);
InvalidatedSlotsCleanup old_to_shared_cleanup =
InvalidatedSlotsCleanup::OldToShared(page);
old_to_shared_cleanup.Free(page->area_start(), failed_start);
}
// Recompute live bytes.
......
......@@ -4,6 +4,7 @@
#include "src/heap/memory-chunk.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/common/globals.h"
......@@ -141,6 +142,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
// Not actually used but initialize anyway for predictability.
invalidated_slots_[OLD_TO_CODE] = nullptr;
}
invalidated_slots_[OLD_TO_SHARED] = nullptr;
progress_bar_.Initialize();
set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
page_protection_change_mutex_ = new base::Mutex();
......@@ -245,10 +247,13 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
if (V8_EXTERNAL_CODE_SPACE_BOOL) ReleaseSlotSet<OLD_TO_CODE>();
ReleaseSlotSet<OLD_TO_SHARED>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_SHARED>();
ReleaseInvalidatedSlots<OLD_TO_NEW>();
ReleaseInvalidatedSlots<OLD_TO_OLD>();
ReleaseInvalidatedSlots<OLD_TO_SHARED>();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
......@@ -348,6 +353,7 @@ InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_SHARED>();
template <RememberedSetType type>
void MemoryChunk::ReleaseInvalidatedSlots() {
......@@ -361,15 +367,28 @@ template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
template V8_EXPORT_PRIVATE void
MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
template V8_EXPORT_PRIVATE void MemoryChunk::RegisterObjectWithInvalidatedSlots<
OLD_TO_SHARED>(HeapObject object);
template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
bool skip_slot_recording;
if (type == OLD_TO_NEW) {
skip_slot_recording = InYoungGeneration();
} else {
skip_slot_recording = ShouldSkipEvacuationSlotRecording();
switch (type) {
case OLD_TO_NEW:
skip_slot_recording = InYoungGeneration();
break;
case OLD_TO_OLD:
skip_slot_recording = ShouldSkipEvacuationSlotRecording();
break;
case OLD_TO_SHARED:
skip_slot_recording = InYoungGeneration();
break;
default:
UNREACHABLE();
}
if (skip_slot_recording) {
......@@ -391,8 +410,13 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
if (slot_set_[OLD_TO_NEW] != nullptr)
if (slot_set_[OLD_TO_NEW] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
if (slot_set_[OLD_TO_SHARED] != nullptr) {
RegisterObjectWithInvalidatedSlots<OLD_TO_SHARED>(object);
}
}
template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
......
......@@ -254,7 +254,8 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page, bool record_free_ranges,
TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* old_to_new_cleanup) {
InvalidatedSlotsCleanup* invalidated_old_to_new_cleanup,
InvalidatedSlotsCleanup* invalidated_old_to_shared_cleanup) {
DCHECK_LE(free_start, free_end);
if (sweeping_mode == SweepingMode::kEagerDuringGC) {
// New space and in consequence the old-to-new remembered set is always
......@@ -284,7 +285,8 @@ V8_INLINE void Sweeper::CleanupRememberedSetEntriesForFreedMemory(
static_cast<uint32_t>(free_end - page->address())));
}
old_to_new_cleanup->Free(free_start, free_end);
invalidated_old_to_new_cleanup->Free(free_start, free_end);
invalidated_old_to_shared_cleanup->Free(free_start, free_end);
}
void Sweeper::CleanupInvalidTypedSlotsOfFreeRanges(
......@@ -368,13 +370,18 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
p->typed_slot_set<OLD_TO_SHARED>() != nullptr ||
DEBUG_BOOL;
// Clean invalidated slots during the final atomic pause. After resuming
// execution this isn't necessary, invalid old-to-new refs were already
// removed by mark compact's update pointers phase.
InvalidatedSlotsCleanup old_to_new_cleanup =
// Clean invalidated slots in free memory during the final atomic pause. After
// resuming execution this isn't necessary, invalid slots were already removed
// by mark compact's update pointers phase. So there are no invalid slots left
// in free memory.
InvalidatedSlotsCleanup invalidated_old_to_new_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
if (sweeping_mode == SweepingMode::kEagerDuringGC)
old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
InvalidatedSlotsCleanup invalidated_old_to_shared_cleanup =
InvalidatedSlotsCleanup::NoCleanup(p);
if (sweeping_mode == SweepingMode::kEagerDuringGC) {
invalidated_old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
invalidated_old_to_shared_cleanup = InvalidatedSlotsCleanup::OldToShared(p);
}
// The free ranges map is used for filtering typed slots.
TypedSlotSet::FreeRangesMap free_ranges_map;
......@@ -401,7 +408,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, record_free_ranges, &free_ranges_map,
sweeping_mode, &old_to_new_cleanup);
sweeping_mode, &invalidated_old_to_new_cleanup,
&invalidated_old_to_shared_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
DCHECK(MarkCompactCollector::IsMapOrForwarded(map));
......@@ -429,7 +437,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
free_list_mode, free_space_mode));
CleanupRememberedSetEntriesForFreedMemory(
free_start, free_end, p, record_free_ranges, &free_ranges_map,
sweeping_mode, &old_to_new_cleanup);
sweeping_mode, &invalidated_old_to_new_cleanup,
&invalidated_old_to_shared_cleanup);
}
// Phase 3: Post process the page.
......
......@@ -144,7 +144,8 @@ class Sweeper {
void CleanupRememberedSetEntriesForFreedMemory(
Address free_start, Address free_end, Page* page, bool record_free_ranges,
TypedSlotSet::FreeRangesMap* free_ranges_map, SweepingMode sweeping_mode,
InvalidatedSlotsCleanup* old_to_new_cleanup);
InvalidatedSlotsCleanup* invalidated_old_to_new_cleanup,
InvalidatedSlotsCleanup* invalidated_old_to_shared_cleanup);
// Helper function for RawSweep. Clears invalid typed slots in the given free
// ranges.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment