Commit ca68fc73 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

heap: Remove SWEEP_TO_ITERATE

Bug: v8:12612
Change-Id: Ife6fd532a49b7f164e890fc9ddea5001d4547b38
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3461928Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79094}
parent 60ac939f
......@@ -79,31 +79,27 @@ class BasicMemoryChunk {
// triggering on the same page.
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
SWEEP_TO_ITERATE = 1u << 17,
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
INCREMENTAL_MARKING = 1u << 18,
NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
INCREMENTAL_MARKING = 1u << 17,
NEW_SPACE_BELOW_AGE_MARK = 1u << 18,
// The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed.
UNREGISTERED = 1u << 20,
UNREGISTERED = 1u << 19,
// The memory chunk belongs to the read-only heap and does not participate
// in garbage collection. This is used instead of owner for identity
// checking since read-only chunks have no owner once they are detached.
READ_ONLY_HEAP = 1u << 21,
READ_ONLY_HEAP = 1u << 20,
// The memory chunk is pinned in memory and can't be moved. This is likely
// because there exists a potential pointer to somewhere in the chunk which
// can't be updated.
PINNED = 1u << 22,
PINNED = 1u << 21,
// This page belongs to a shared heap.
IN_SHARED_HEAP = 1u << 23,
IN_SHARED_HEAP = 1u << 22,
};
using MainThreadFlags = base::Flags<Flag, uintptr_t>;
......
......@@ -45,10 +45,10 @@ struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
static constexpr uintptr_t kIsExecutableBit = uintptr_t{1} << 0;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 17;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 20;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
......
......@@ -447,69 +447,70 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate());
collector_->marking_worklists()->Update(
[
collector_->marking_worklists()->Update([
#ifdef DEBUG
// this is referred inside DCHECK.
this,
// this is referred inside DCHECK.
this,
#endif
minor_marking_state, cage_base,
filler_map](HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
// (frames). If these object are dead at scavenging time, their
// marking deque entries will not point to forwarding addresses.
// Hence, we can discard them.
return false;
}
HeapObject dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller());
if (dest.InSharedHeap()) {
// Object got promoted into the shared heap. Drop it from the client
// heap marking worklist.
return false;
}
*out = dest;
return true;
} else if (Heap::InToPage(obj)) {
// The object may be on a large page or on a page that was moved in
// new space.
DCHECK(Heap::IsLargeObject(obj) ||
Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
if (minor_marking_state->IsWhite(obj)) {
return false;
}
// Either a large object or an object marked by the minor
// mark-compactor.
*out = obj;
return true;
} else {
// The object may be on a page that was moved from new to old space.
// Only applicable during minor MC garbage collections.
if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
if (minor_marking_state->IsWhite(obj)) {
return false;
}
*out = obj;
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
minor_marking_state, cage_base,
filler_map](
HeapObject obj,
HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
// (frames). If these object are dead at scavenging time, their
// marking deque entries will not point to forwarding addresses.
// Hence, we can discard them.
return false;
}
HeapObject dest = map_word.ToForwardingAddress();
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
if (dest.InSharedHeap()) {
// Object got promoted into the shared heap. Drop it from the client
// heap marking worklist.
return false;
}
*out = dest;
return true;
} else if (Heap::InToPage(obj)) {
// The object may be on a large page or on a page that was moved in
// new space.
DCHECK(Heap::IsLargeObject(obj) || Page::FromHeapObject(obj)->IsFlagSet(
Page::PAGE_NEW_NEW_PROMOTION));
if (minor_marking_state->IsWhite(obj)) {
return false;
}
// Either a large object or an object marked by the minor
// mark-compactor.
*out = obj;
return true;
} else {
// The object may be on a page that was moved from new to old space.
// Only applicable during minor MC garbage collections.
if (!Heap::IsLargeObject(obj) &&
Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
if (minor_marking_state->IsWhite(obj)) {
return false;
}
});
*out = obj;
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
return false;
}
});
collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge();
......
......@@ -615,8 +615,6 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
MarkLiveObjects();
ClearNonLiveReferences();
VerifyMarking();
......@@ -4101,10 +4099,9 @@ void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
// Full GCs don't promote pages within new space.
DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
......@@ -5091,14 +5088,13 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete main_marking_visitor_;
}
void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
for (Page* p : sweep_to_iterate_pages_) {
if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
p->ClearFlag(Page::SWEEP_TO_ITERATE);
non_atomic_marking_state()->ClearLiveness(p);
}
void MinorMarkCompactCollector::CleanupPromotedPages() {
for (Page* p : promoted_pages_) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
non_atomic_marking_state()->ClearLiveness(p);
}
sweep_to_iterate_pages_.clear();
promoted_pages_.clear();
}
void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
......@@ -5264,7 +5260,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
CleanupSweepToIteratePages();
}
heap()->array_buffer_sweeper()->EnsureFinished();
......@@ -5295,7 +5290,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
for (Page* p :
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
DCHECK_EQ(promoted_pages_.end(),
std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
non_atomic_marking_state()->ClearLiveness(p);
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
......@@ -5310,6 +5306,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
CleanupPromotedPages();
SweepArrayBufferExtensions();
}
......@@ -5865,10 +5863,7 @@ void MinorMarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->SetFlag(Page::SWEEP_TO_ITERATE);
sweep_to_iterate_pages_.push_back(p);
promoted_pages_.push_back(p);
}
}
new_space_evacuation_pages_.clear();
......
......@@ -865,7 +865,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CollectGarbage() override;
void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
void CleanupSweepToIteratePages();
void CleanupPromotedPages();
private:
using MarkingWorklist =
......@@ -914,7 +914,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
std::vector<Page*> promoted_pages_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment