Commit ca68fc73 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

heap: Remove SWEEP_TO_ITERATE

Bug: v8:12612
Change-Id: Ife6fd532a49b7f164e890fc9ddea5001d4547b38
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3461928Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79094}
parent 60ac939f
...@@ -79,31 +79,27 @@ class BasicMemoryChunk { ...@@ -79,31 +79,27 @@ class BasicMemoryChunk {
// triggering on the same page. // triggering on the same page.
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16, COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
SWEEP_TO_ITERATE = 1u << 17,
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled. // enabled.
INCREMENTAL_MARKING = 1u << 18, INCREMENTAL_MARKING = 1u << 17,
NEW_SPACE_BELOW_AGE_MARK = 1u << 19, NEW_SPACE_BELOW_AGE_MARK = 1u << 18,
// The memory chunk freeing bookkeeping has been performed but the chunk has // The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed. // not yet been freed.
UNREGISTERED = 1u << 20, UNREGISTERED = 1u << 19,
// The memory chunk belongs to the read-only heap and does not participate // The memory chunk belongs to the read-only heap and does not participate
// in garbage collection. This is used instead of owner for identity // in garbage collection. This is used instead of owner for identity
// checking since read-only chunks have no owner once they are detached. // checking since read-only chunks have no owner once they are detached.
READ_ONLY_HEAP = 1u << 21, READ_ONLY_HEAP = 1u << 20,
// The memory chunk is pinned in memory and can't be moved. This is likely // The memory chunk is pinned in memory and can't be moved. This is likely
// because there exists a potential pointer to somewhere in the chunk which // because there exists a potential pointer to somewhere in the chunk which
// can't be updated. // can't be updated.
PINNED = 1u << 22, PINNED = 1u << 21,
// This page belongs to a shared heap. // This page belongs to a shared heap.
IN_SHARED_HEAP = 1u << 23, IN_SHARED_HEAP = 1u << 22,
}; };
using MainThreadFlags = base::Flags<Flag, uintptr_t>; using MainThreadFlags = base::Flags<Flag, uintptr_t>;
......
...@@ -45,10 +45,10 @@ struct MemoryChunk { ...@@ -45,10 +45,10 @@ struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize; static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize; static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
static constexpr uintptr_t kIsExecutableBit = uintptr_t{1} << 0; static constexpr uintptr_t kIsExecutableBit = uintptr_t{1} << 0;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18; static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 17;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3; static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4; static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21; static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 20;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject( V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) { HeapObject object) {
......
...@@ -447,69 +447,70 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() { ...@@ -447,69 +447,70 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
collector_->local_marking_worklists()->Publish(); collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap()); MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate()); PtrComprCageBase cage_base(heap_->isolate());
collector_->marking_worklists()->Update( collector_->marking_worklists()->Update([
[
#ifdef DEBUG #ifdef DEBUG
// this is referred inside DCHECK. // this is referred inside DCHECK.
this, this,
#endif #endif
minor_marking_state, cage_base, minor_marking_state, cage_base,
filler_map](HeapObject obj, HeapObject* out) -> bool { filler_map](
DCHECK(obj.IsHeapObject()); HeapObject obj,
// Only pointers to from space have to be updated. HeapObject* out) -> bool {
if (Heap::InFromPage(obj)) { DCHECK(obj.IsHeapObject());
MapWord map_word = obj.map_word(cage_base, kRelaxedLoad); // Only pointers to from space have to be updated.
if (!map_word.IsForwardingAddress()) { if (Heap::InFromPage(obj)) {
// There may be objects on the marking deque that do not exist MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
// anymore, e.g. left trimmed objects or objects from the root set if (!map_word.IsForwardingAddress()) {
// (frames). If these object are dead at scavenging time, their // There may be objects on the marking deque that do not exist
// marking deque entries will not point to forwarding addresses. // anymore, e.g. left trimmed objects or objects from the root set
// Hence, we can discard them. // (frames). If these object are dead at scavenging time, their
return false; // marking deque entries will not point to forwarding addresses.
} // Hence, we can discard them.
HeapObject dest = map_word.ToForwardingAddress(); return false;
DCHECK_IMPLIES(marking_state()->IsWhite(obj), }
obj.IsFreeSpaceOrFiller()); HeapObject dest = map_word.ToForwardingAddress();
if (dest.InSharedHeap()) { DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
// Object got promoted into the shared heap. Drop it from the client if (dest.InSharedHeap()) {
// heap marking worklist. // Object got promoted into the shared heap. Drop it from the client
return false; // heap marking worklist.
} return false;
*out = dest; }
return true; *out = dest;
} else if (Heap::InToPage(obj)) { return true;
// The object may be on a large page or on a page that was moved in } else if (Heap::InToPage(obj)) {
// new space. // The object may be on a large page or on a page that was moved in
DCHECK(Heap::IsLargeObject(obj) || // new space.
Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)); DCHECK(Heap::IsLargeObject(obj) || Page::FromHeapObject(obj)->IsFlagSet(
if (minor_marking_state->IsWhite(obj)) { Page::PAGE_NEW_NEW_PROMOTION));
return false; if (minor_marking_state->IsWhite(obj)) {
} return false;
// Either a large object or an object marked by the minor }
// mark-compactor. // Either a large object or an object marked by the minor
*out = obj; // mark-compactor.
return true; *out = obj;
} else { return true;
// The object may be on a page that was moved from new to old space. } else {
// Only applicable during minor MC garbage collections. // The object may be on a page that was moved from new to old space.
if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) { // Only applicable during minor MC garbage collections.
if (minor_marking_state->IsWhite(obj)) { if (!Heap::IsLargeObject(obj) &&
return false; Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
} if (minor_marking_state->IsWhite(obj)) {
*out = obj;
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
return false; return false;
} }
}); *out = obj;
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
return false;
}
});
collector_->local_weak_objects()->Publish(); collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge(); weak_objects_->UpdateAfterScavenge();
......
...@@ -615,8 +615,6 @@ void MarkCompactCollector::CollectGarbage() { ...@@ -615,8 +615,6 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed. // update the state as they proceed.
DCHECK(state_ == PREPARE_GC); DCHECK(state_ == PREPARE_GC);
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
MarkLiveObjects(); MarkLiveObjects();
ClearNonLiveReferences(); ClearNonLiveReferences();
VerifyMarking(); VerifyMarking();
...@@ -4101,10 +4099,9 @@ void MarkCompactCollector::Evacuate() { ...@@ -4101,10 +4099,9 @@ void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) { for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { // Full GCs don't promote pages within new space.
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
sweeper()->AddPageForIterability(p); if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(OLD_SPACE, p->owner_identity()); DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR); sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
...@@ -5091,14 +5088,13 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() { ...@@ -5091,14 +5088,13 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete main_marking_visitor_; delete main_marking_visitor_;
} }
void MinorMarkCompactCollector::CleanupSweepToIteratePages() { void MinorMarkCompactCollector::CleanupPromotedPages() {
for (Page* p : sweep_to_iterate_pages_) { for (Page* p : promoted_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) { p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
p->ClearFlag(Page::SWEEP_TO_ITERATE); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
non_atomic_marking_state()->ClearLiveness(p); non_atomic_marking_state()->ClearLiveness(p);
}
} }
sweep_to_iterate_pages_.clear(); promoted_pages_.clear();
} }
void MinorMarkCompactCollector::SweepArrayBufferExtensions() { void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
...@@ -5264,7 +5260,6 @@ void MinorMarkCompactCollector::CollectGarbage() { ...@@ -5264,7 +5260,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING); TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
CleanupSweepToIteratePages();
} }
heap()->array_buffer_sweeper()->EnsureFinished(); heap()->array_buffer_sweeper()->EnsureFinished();
...@@ -5295,7 +5290,8 @@ void MinorMarkCompactCollector::CollectGarbage() { ...@@ -5295,7 +5290,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS); TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
for (Page* p : for (Page* p :
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) { PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE)); DCHECK_EQ(promoted_pages_.end(),
std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
non_atomic_marking_state()->ClearLiveness(p); non_atomic_marking_state()->ClearLiveness(p);
if (FLAG_concurrent_marking) { if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are // Ensure that concurrent marker does not track pages that are
...@@ -5310,6 +5306,8 @@ void MinorMarkCompactCollector::CollectGarbage() { ...@@ -5310,6 +5306,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; }); heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
} }
CleanupPromotedPages();
SweepArrayBufferExtensions(); SweepArrayBufferExtensions();
} }
...@@ -5865,10 +5863,7 @@ void MinorMarkCompactCollector::Evacuate() { ...@@ -5865,10 +5863,7 @@ void MinorMarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) { for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); promoted_pages_.push_back(p);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->SetFlag(Page::SWEEP_TO_ITERATE);
sweep_to_iterate_pages_.push_back(p);
} }
} }
new_space_evacuation_pages_.clear(); new_space_evacuation_pages_.clear();
......
...@@ -865,7 +865,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -865,7 +865,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CollectGarbage() override; void CollectGarbage() override;
void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode); void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
void CleanupSweepToIteratePages(); void CleanupPromotedPages();
private: private:
using MarkingWorklist = using MarkingWorklist =
...@@ -914,7 +914,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -914,7 +914,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
YoungGenerationMarkingVisitor* main_marking_visitor_; YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_; base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_; std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_; std::vector<Page*> promoted_pages_;
friend class YoungGenerationMarkingTask; friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob; friend class YoungGenerationMarkingJob;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment