Commit 924be695 authored by Omer Katz's avatar Omer Katz Committed by V8 LUCI CQ

[heap] Use PagedNewSpace when MinorMC is enabled

This CL also introduces/updates DCHECKs that some methods are never
reached with MinorMC (they may still be reached by full GC when MinorMC
is disabled).

Bug: v8:12612
Change-Id: I8afb8c964bc5c44225a92d0f8d9ac5a4c0ecef75
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3823130Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82439}
parent 54ef0d87
......@@ -63,6 +63,7 @@ T ForwardingAddress(T heap_obj) {
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (Heap::InFromPage(heap_obj)) {
DCHECK(!FLAG_minor_mc);
return T();
} else {
return heap_obj;
......@@ -410,8 +411,9 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
#ifdef DEBUG
BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
DCHECK_IMPLIES(chunk->IsToPage(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(
chunk->IsToPage(),
FLAG_minor_mc || chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(!chunk->InYoungGeneration(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
#endif
......
......@@ -930,6 +930,9 @@ void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
void UpdateRetainersMapAfterScavenge(
std::unordered_map<HeapObject, HeapObject, Object::Hasher>* map) {
// This is only used for Scavenger.
DCHECK(!FLAG_minor_mc);
std::unordered_map<HeapObject, HeapObject, Object::Hasher> updated_map;
for (auto pair : *map) {
......@@ -957,7 +960,7 @@ void UpdateRetainersMapAfterScavenge(
void Heap::UpdateRetainersAfterScavenge() {
if (!incremental_marking()->IsMarking()) return;
// This isn't supported for Minor MC.
// This is only used for Scavenger.
DCHECK(!FLAG_minor_mc);
UpdateRetainersMapAfterScavenge(&retainer_);
......@@ -2598,11 +2601,6 @@ void Heap::MinorMarkCompact() {
CHECK_EQ(NOT_IN_GC, gc_state());
DCHECK(new_space());
if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] MinorMarkCompact during marking.\n");
}
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
......@@ -2775,6 +2773,9 @@ void Heap::UpdateExternalString(String string, size_t old_payload,
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
// This is only used for Scavenger.
DCHECK(!FLAG_minor_mc);
PtrComprCageBase cage_base(heap->isolate());
HeapObject obj = HeapObject::cast(*p);
MapWord first_word = obj.map_word(cage_base, kRelaxedLoad);
......@@ -4781,6 +4782,11 @@ void Heap::VerifyCommittedPhysicalMemory() {
space = spaces.Next()) {
space->VerifyCommittedPhysicalMemory();
}
if (FLAG_minor_mc && new_space()) {
PagedNewSpace::From(new_space())
->paged_space()
->VerifyCommittedPhysicalMemory();
}
}
#endif // DEBUG
......@@ -5757,9 +5763,15 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
DCHECK_NOT_NULL(read_only_space_);
const bool has_young_gen = !FLAG_single_generation && !IsShared();
if (has_young_gen) {
space_[NEW_SPACE] = new_space_ =
new SemiSpaceNewSpace(this, initial_semispace_size_,
max_semi_space_size_, new_allocation_info);
if (FLAG_minor_mc) {
space_[NEW_SPACE] = new_space_ =
new PagedNewSpace(this, initial_semispace_size_, max_semi_space_size_,
new_allocation_info);
} else {
space_[NEW_SPACE] = new_space_ =
new SemiSpaceNewSpace(this, initial_semispace_size_,
max_semi_space_size_, new_allocation_info);
}
space_[NEW_LO_SPACE] = new_lo_space_ =
new NewLargeObjectSpace(this, NewSpaceCapacity());
}
......@@ -5918,6 +5930,7 @@ void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
MemoryChunk* chunk) {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!deserialization_complete()) {
DCHECK_NE(NEW_SPACE, chunk->owner()->identity());
chunk->MarkNeverEvacuate();
}
if (space == CODE_SPACE || space == CODE_LO_SPACE) {
......
......@@ -378,6 +378,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
DCHECK(!FLAG_minor_mc);
MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
......@@ -401,6 +402,11 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
// new space.
DCHECK(Heap::IsLargeObject(obj) || Page::FromHeapObject(obj)->IsFlagSet(
Page::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(FLAG_minor_mc, !Page::FromHeapObject(obj)->IsFlagSet(
Page::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(
FLAG_minor_mc,
!obj.map_word(cage_base, kRelaxedLoad).IsForwardingAddress());
if (minor_marking_state->IsWhite(obj)) {
return false;
}
......
......@@ -137,7 +137,7 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
void VerifyMarking(PagedSpaceBase* paged_space);
void VerifyMarking(LargeObjectSpace* lo_space);
Heap* heap_;
......@@ -177,6 +177,10 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
void MarkingVerifier::VerifyMarking(NewSpace* space) {
if (!space) return;
if (FLAG_minor_mc) {
VerifyMarking(PagedNewSpace::From(space)->paged_space());
return;
}
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
......@@ -192,7 +196,7 @@ void MarkingVerifier::VerifyMarking(NewSpace* space) {
}
}
void MarkingVerifier::VerifyMarking(PagedSpace* space) {
void MarkingVerifier::VerifyMarking(PagedSpaceBase* space) {
for (Page* p : *space) {
VerifyMarkingOnPage(p, p->area_start(), p->area_end());
}
......@@ -344,7 +348,7 @@ class EvacuationVerifier : public ObjectVisitorWithCageBases,
void VerifyRoots();
void VerifyEvacuationOnPage(Address start, Address end);
void VerifyEvacuation(NewSpace* new_space);
void VerifyEvacuation(PagedSpace* paged_space);
void VerifyEvacuation(PagedSpaceBase* paged_space);
Heap* heap_;
};
......@@ -367,6 +371,10 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
if (!space) return;
if (FLAG_minor_mc) {
VerifyEvacuation(PagedNewSpace::From(space)->paged_space());
return;
}
PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
......@@ -377,7 +385,7 @@ void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
}
}
void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
void EvacuationVerifier::VerifyEvacuation(PagedSpaceBase* space) {
for (Page* p : *space) {
if (p->IsEvacuationCandidate()) continue;
if (p->Contains(space->top())) {
......@@ -645,7 +653,7 @@ void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpaceBase* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
......@@ -654,6 +662,10 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
if (!space) return;
if (FLAG_minor_mc) {
VerifyMarkbitsAreClean(PagedNewSpace::From(space)->paged_space());
return;
}
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
......@@ -1024,9 +1036,12 @@ void MarkCompactCollector::Prepare() {
heap()->new_lo_space()->ResetPendingObject();
}
if (heap()->new_space()) {
DCHECK_EQ(heap()->new_space()->top(),
heap()->new_space()->original_top_acquire());
NewSpace* new_space = heap()->new_space();
if (new_space) {
if (FLAG_minor_mc) {
PagedNewSpace::From(new_space)->paged_space()->PrepareForMarkCompact();
}
DCHECK_EQ(new_space->top(), new_space->original_top_acquire());
}
}
......@@ -1058,6 +1073,10 @@ void MarkCompactCollector::VerifyMarking() {
heap()->old_space()->VerifyLiveBytes();
if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
if (FLAG_minor_mc && heap()->new_space())
PagedNewSpace::From(heap()->new_space())
->paged_space()
->VerifyLiveBytes();
}
#endif
}
......@@ -1626,9 +1645,10 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
if (value->IsStrongOrWeak()) {
BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
DCHECK_IMPLIES(p->IsToPage(),
FLAG_minor_mc ||
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsLargePage());
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone());
......@@ -1777,7 +1797,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
AllocationResult allocation;
if (ShouldPromoteIntoSharedHeap(map)) {
DCHECK_EQ(target_space, OLD_SPACE);
DCHECK(Heap::InYoungGeneration(object));
// TODO(v8:12612): Implement promotion from new space to shared heap.
DCHECK_IMPLIES(!FLAG_minor_mc, Heap::InYoungGeneration(object));
DCHECK_NOT_NULL(shared_old_allocator_);
allocation = shared_old_allocator_->AllocateRaw(size, alignment,
AllocationOrigin::kGC);
......@@ -1880,6 +1901,9 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (heap_->new_space()->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
// Full GCs use AlwaysPromoteYoung::kYes above and MinorMC should never
// move objects.
DCHECK(!FLAG_minor_mc);
promoted_size_ += size;
return true;
}
......@@ -1899,7 +1923,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
private:
inline bool TryEvacuateWithoutCopy(HeapObject object) {
if (is_incremental_marking_) return false;
DCHECK(!is_incremental_marking_);
Map map = object.map();
......@@ -1966,6 +1990,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
static void Move(Page* page) {
switch (mode) {
case NEW_TO_NEW:
DCHECK(!FLAG_minor_mc);
page->heap()->new_space()->PromotePageInNewSpace(page);
break;
case NEW_TO_OLD: {
......@@ -3559,7 +3584,7 @@ static inline void UpdateSlot(PtrComprCageBase cage_base, TSlot slot,
"expected here");
MapWord map_word = heap_obj.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
DCHECK_IMPLIES((!FLAG_minor_mc && !Heap::InFromPage(heap_obj)),
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromHeapObject(heap_obj)->IsFlagSet(
Page::COMPACTION_WAS_ABORTED));
......@@ -5551,9 +5576,10 @@ class YoungGenerationRecordMigratedSlotVisitor final
if (value->IsStrongOrWeak()) {
BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
DCHECK_IMPLIES(p->IsToPage(),
FLAG_minor_mc ||
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsLargePage());
MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
DCHECK(chunk->SweepingDone());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
......
......@@ -504,7 +504,7 @@ class MarkCompactCollector final : public CollectorBase {
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpaceBase* space);
void VerifyMarkbitsAreClean(NewSpace* space);
void VerifyMarkbitsAreClean(LargeObjectSpace* space);
#endif
......
......@@ -909,10 +909,9 @@ PagedSpaceForNewSpace::PagedSpaceForNewSpace(
target_capacity_(initial_capacity_) {
DCHECK_LE(initial_capacity_, max_capacity_);
// Adding entries to the free list requires having a map for free space. Not
// preallocating pages yet because the map may not be available yet when the
// space is initialized. `EnsureCurrentCapacity()` should be called after maps
// are allocated to preallocate pages.
if (!PreallocatePages()) {
V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
}
}
Page* PagedSpaceForNewSpace::InitializePage(MemoryChunk* chunk) {
......@@ -997,10 +996,9 @@ bool PagedSpaceForNewSpace::AddFreshPage() {
return EnsureCurrentCapacity();
}
bool PagedSpaceForNewSpace::EnsureCurrentCapacity() {
bool PagedSpaceForNewSpace::PreallocatePages() {
// Verify that the free space map is already initialized. Otherwise, new free
// list entries will be invalid.
DCHECK_NE(0, heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr());
while (current_capacity_ < target_capacity_) {
if (!TryExpandImpl()) return false;
}
......@@ -1008,6 +1006,13 @@ bool PagedSpaceForNewSpace::EnsureCurrentCapacity() {
return true;
}
bool PagedSpaceForNewSpace::EnsureCurrentCapacity() {
// Verify that the free space map is already initialized. Otherwise, new free
// list entries will be invalid.
DCHECK_NE(0, heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr());
return PreallocatePages();
}
void PagedSpaceForNewSpace::FreeLinearAllocationArea() {
size_t remaining_allocation_area_size = limit() - top();
DCHECK_GE(allocated_linear_areas_, remaining_allocation_area_size);
......
......@@ -350,6 +350,7 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
public:
static SemiSpaceNewSpace* From(NewSpace* space) {
DCHECK(!FLAG_minor_mc);
return static_cast<SemiSpaceNewSpace*>(space);
}
......@@ -628,6 +629,8 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
private:
bool PreallocatePages();
const size_t initial_capacity_;
const size_t max_capacity_;
size_t target_capacity_ = 0;
......@@ -642,6 +645,7 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
public:
static PagedNewSpace* From(NewSpace* space) {
DCHECK(FLAG_minor_mc);
return static_cast<PagedNewSpace*>(space);
}
......
......@@ -184,6 +184,7 @@ void PagedSpaceBase::MergeCompactionSpace(CompactionSpace* other) {
base::MutexGuard guard(mutex());
DCHECK_NE(NEW_SPACE, identity());
DCHECK_NE(NEW_SPACE, other->identity());
DCHECK(identity() == other->identity());
// Unmerged fields:
......@@ -325,6 +326,9 @@ void PagedSpaceBase::RemovePage(Page* page) {
DCHECK_IMPLIES(identity() == NEW_SPACE, page->IsFlagSet(Page::TO_PAGE));
memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page);
if (identity() == NEW_SPACE) {
page->ReleaseFreeListCategories();
}
DecreaseAllocatedBytes(page->allocated_bytes(), page);
DecreaseCapacity(page->area_size());
AccountUncommitted(page->size());
......@@ -359,6 +363,7 @@ void PagedSpaceBase::ResetFreeList() {
free_list_->EvictFreeListItems(page);
}
DCHECK(free_list_->IsEmpty());
DCHECK_EQ(0, free_list_->Available());
}
void PagedSpaceBase::ShrinkImmortalImmovablePages() {
......@@ -504,7 +509,7 @@ void PagedSpaceBase::FreeLinearAllocationArea() {
AdvanceAllocationObservers();
if (current_top != current_limit &&
if (identity() != NEW_SPACE && current_top != current_limit &&
heap()->incremental_marking()->black_allocation()) {
Page::FromAddress(current_top)
->DestroyBlackArea(current_top, current_limit);
......
......@@ -8,6 +8,7 @@
#include "src/execution/protectors.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/new-spaces.h"
#include "src/ic/handler-configuration.h"
#include "src/init/heap-symbols.h"
#include "src/init/setup-isolate.h"
......@@ -74,6 +75,12 @@ bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
if (FLAG_minor_mc && new_space()) {
PagedNewSpace::From(new_space())
->paged_space()
->free_list()
->RepairLists(this);
}
CreateApiObjects();
// Create initial objects
......
......@@ -928,6 +928,8 @@ void StringForwardingTable::Block::UpdateAfterEvacuation(Isolate* isolate) {
void StringForwardingTable::Block::UpdateAfterEvacuation(Isolate* isolate,
int up_to_index) {
// This is only used for Scavenger.
DCHECK(!FLAG_minor_mc);
DCHECK(FLAG_always_use_string_forwarding_table);
for (int index = 0; index < up_to_index; ++index) {
Object original = Get(isolate, IndexOfOriginalString(index));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment