Commit a0ed6096 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Introduce CLIENT_TO_SHARED remembered set

During a shared GC we need to iterate the twice: for marking and later
when updating pointers after evacuation. This CL introduces a new
remembered set to avoid the second heap iteration, the remembered set
is created when iterating the client heaps for marking. When updating
pointers, the GC only needs to visit slots in the remembered set.
CLIENT_TO_SHARED is only used during GC atm.

Bug: v8:11708
Change-Id: Ie7482babb53b5f6ca2115daafe6f208acae98d6e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3315443Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78332}
parent 6b503b95
......@@ -2297,6 +2297,10 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
client->heap()->FreeSharedLinearAllocationAreas();
// As long as we need to iterate the client heap to find references into the
// shared heap, all client heaps need to be iterable.
client->heap()->MakeHeapIterable();
});
PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
......
......@@ -94,8 +94,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
LargePage* first_page() override {
return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); }
......
This diff is collapsed.
......@@ -642,6 +642,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Updates pointers to shared objects from client heaps.
void UpdatePointersInClientHeaps();
void UpdatePointersInClientHeap(Isolate* client);
// Marks object reachable from harmony weak maps and wrapper tracing.
void ProcessEphemeronMarking();
......
......@@ -27,7 +27,9 @@ class SlotSet;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_OLD + 1 : OLD_TO_OLD,
CLIENT_TO_SHARED,
OLD_TO_CODE =
V8_EXTERNAL_CODE_SPACE_BOOL ? CLIENT_TO_SHARED + 1 : CLIENT_TO_SHARED,
NUMBER_OF_REMEMBERED_SET_TYPES
};
......
......@@ -122,6 +122,8 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[CLIENT_TO_SHARED],
nullptr);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
nullptr);
......@@ -259,6 +261,8 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template V8_EXPORT_PRIVATE SlotSet*
MemoryChunk::AllocateSlotSet<CLIENT_TO_SHARED>();
#ifdef V8_EXTERNAL_CODE_SPACE
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_CODE>();
#endif // V8_EXTERNAL_CODE_SPACE
......@@ -286,6 +290,7 @@ SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
template void MemoryChunk::ReleaseSlotSet<CLIENT_TO_SHARED>();
#ifdef V8_EXTERNAL_CODE_SPACE
template void MemoryChunk::ReleaseSlotSet<OLD_TO_CODE>();
#endif // V8_EXTERNAL_CODE_SPACE
......
......@@ -139,11 +139,18 @@ class SemiSpace : public Space {
size_t Available() override { UNREACHABLE(); }
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
Page* first_page() override {
return reinterpret_cast<Page*>(memory_chunk_list_.front());
}
Page* last_page() override {
return reinterpret_cast<Page*>(memory_chunk_list_.back());
}
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
const Page* first_page() const override {
return reinterpret_cast<const Page*>(memory_chunk_list_.front());
}
const Page* last_page() const override {
return reinterpret_cast<const Page*>(memory_chunk_list_.back());
}
iterator begin() { return iterator(first_page()); }
......@@ -447,8 +454,11 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace* active_space() { return &to_space_; }
Page* first_page() { return to_space_.first_page(); }
Page* last_page() { return to_space_.last_page(); }
Page* first_page() override { return to_space_.first_page(); }
Page* last_page() override { return to_space_.last_page(); }
const Page* first_page() const override { return to_space_.first_page(); }
const Page* last_page() const override { return to_space_.last_page(); }
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
......
......@@ -289,9 +289,11 @@ class V8_EXPORT_PRIVATE PagedSpace
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
const Page* first_page() const {
return reinterpret_cast<const Page*>(Space::first_page());
Page* first_page() override {
return reinterpret_cast<Page*>(memory_chunk_list_.front());
}
const Page* first_page() const override {
return reinterpret_cast<const Page*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); }
......
......@@ -175,6 +175,24 @@ bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
return false;
}
bool MemoryChunkIterator::HasNext() {
if (current_chunk_) return true;
while (space_iterator_.HasNext()) {
Space* space = space_iterator_.Next();
current_chunk_ = space->first_page();
if (current_chunk_) return true;
}
return false;
}
MemoryChunk* MemoryChunkIterator::Next() {
MemoryChunk* chunk = current_chunk_;
current_chunk_ = chunk->list_node().next();
return chunk;
}
} // namespace internal
} // namespace v8
......
......@@ -170,11 +170,15 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
return external_backing_store_bytes_[type];
}
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
virtual MemoryChunk* first_page() { return memory_chunk_list_.front(); }
virtual MemoryChunk* last_page() { return memory_chunk_list_.back(); }
const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }
virtual const MemoryChunk* first_page() const {
return memory_chunk_list_.front();
}
virtual const MemoryChunk* last_page() const {
return memory_chunk_list_.back();
}
heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
......@@ -490,6 +494,19 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
// Iterates over all memory chunks in the heap (across all spaces).
class MemoryChunkIterator {
public:
explicit MemoryChunkIterator(Heap* heap) : space_iterator_(heap) {}
V8_INLINE bool HasNext();
V8_INLINE MemoryChunk* Next();
private:
SpaceIterator space_iterator_;
MemoryChunk* current_chunk_ = nullptr;
};
} // namespace internal
} // namespace v8
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment