Commit 445190bf authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Fix bogus object size computation

The map of an object may be gone by the time we try to compute its
size for accounting purposes.

Bug: chromium:1319217
Change-Id: I93cca766a8cedebf4ed30a3a65fd6eff5bc72bcf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3605817Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80271}
parent 27ce2c06
...@@ -66,19 +66,6 @@ size_t LargeObjectSpace::Available() const { ...@@ -66,19 +66,6 @@ size_t LargeObjectSpace::Available() const {
return 0; return 0;
} }
Address LargePage::GetAddressToShrink(Address object_address,
size_t object_size) {
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
...@@ -244,22 +231,6 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) { ...@@ -244,22 +231,6 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
return nullptr; return nullptr;
} }
void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
}
}
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) { void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page); for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size(); current < reinterpret_cast<Address>(page) + page->size();
...@@ -308,42 +279,44 @@ void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) { ...@@ -308,42 +279,44 @@ void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
page->set_owner(nullptr); page->set_owner(nullptr);
} }
void LargeObjectSpace::FreeUnmarkedObjects() { namespace {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state = // Returns the `GetCommitPageSize()`-aligned end of the payload that can be
heap()->incremental_marking()->non_atomic_marking_state(); // used to shrink down an object. Returns kNullAddress if shrinking is not
// Right-trimming does not update the objects_size_ counter. We are lazily // supported.
// updating it after every GC. Address GetEndOfPayload(LargePage* page, Address object_address,
size_t surviving_object_size = 0; size_t object_size) {
if (page->executable() == EXECUTABLE) {
return kNullAddress;
}
const size_t used_committed_size =
::RoundUp((object_address - page->address()) + object_size,
MemoryAllocator::GetCommitPageSize());
return (used_committed_size < page->size())
? page->address() + used_committed_size
: kNullAddress;
}
} // namespace
void LargeObjectSpace::ShrinkPageToObjectSize(LargePage* page,
HeapObject object,
size_t object_size) {
#ifdef DEBUG
PtrComprCageBase cage_base(heap()->isolate()); PtrComprCageBase cage_base(heap()->isolate());
while (current) { DCHECK_EQ(object, page->GetObject());
LargePage* next_current = current->next_page(); DCHECK_EQ(object_size, page->GetObject().Size(cage_base));
HeapObject object = current->GetObject(); #endif // DEBUG
DCHECK(!marking_state->IsGrey(object)); Address free_start = GetEndOfPayload(page, object.address(), object_size);
size_t size = static_cast<size_t>(object.Size(cage_base)); if (free_start != kNullAddress) {
if (marking_state->IsBlack(object)) { DCHECK(!page->IsFlagSet(Page::IS_EXECUTABLE));
Address free_start; page->ClearOutOfLiveRangeSlots(free_start);
surviving_object_size += size; const size_t bytes_to_free = page->size() - (free_start - page->address());
if ((free_start = current->GetAddressToShrink(object.address(), size)) != heap()->memory_allocator()->PartialFreeMemory(
0) { page, free_start, bytes_to_free, page->area_start() + object_size);
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE)); size_ -= bytes_to_free;
current->ClearOutOfLiveRangeSlots(free_start); AccountUncommitted(bytes_to_free);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
}
current = next_current;
} }
objects_size_ = surviving_object_size;
} }
bool LargeObjectSpace::Contains(HeapObject object) const { bool LargeObjectSpace::Contains(HeapObject object) const {
......
...@@ -47,10 +47,6 @@ class LargePage : public MemoryChunk { ...@@ -47,10 +47,6 @@ class LargePage : public MemoryChunk {
return static_cast<const LargePage*>(list_node_.next()); return static_cast<const LargePage*>(list_node_.next());
} }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start); void ClearOutOfLiveRangeSlots(Address free_start);
private: private:
...@@ -87,8 +83,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -87,8 +83,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int PageCount() const { return page_count_; } int PageCount() const { return page_count_; }
// Frees unmarked objects. void ShrinkPageToObjectSize(LargePage* page, HeapObject object,
virtual void FreeUnmarkedObjects(); size_t object_size);
// Checks whether a heap object is in this space; O(1). // Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj) const; bool Contains(HeapObject obj) const;
...@@ -141,6 +137,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -141,6 +137,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
return &pending_allocation_mutex_; return &pending_allocation_mutex_;
} }
void set_objects_size(size_t objects_size) { objects_size_ = objects_size; }
protected: protected:
LargeObjectSpace(Heap* heap, AllocationSpace id); LargeObjectSpace(Heap* heap, AllocationSpace id);
...@@ -176,9 +174,6 @@ class OldLargeObjectSpace : public LargeObjectSpace { ...@@ -176,9 +174,6 @@ class OldLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size); AllocateRawBackground(LocalHeap* local_heap, int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page); void PromoteNewLargeObject(LargePage* page);
protected: protected:
......
...@@ -625,7 +625,7 @@ void MarkCompactCollector::CollectGarbage() { ...@@ -625,7 +625,7 @@ void MarkCompactCollector::CollectGarbage() {
heap()->memory_measurement()->FinishProcessing(native_context_stats_); heap()->memory_measurement()->FinishProcessing(native_context_stats_);
RecordObjectStats(); RecordObjectStats();
StartSweepSpaces(); Sweep();
Evacuate(); Evacuate();
Finish(); Finish();
} }
...@@ -1054,6 +1054,23 @@ void MarkCompactCollector::VerifyMarking() { ...@@ -1054,6 +1054,23 @@ void MarkCompactCollector::VerifyMarking() {
#endif #endif
} }
namespace {
void ShrinkPagesToObjectSizes(Heap* heap, OldLargeObjectSpace* space) {
size_t surviving_object_size = 0;
PtrComprCageBase cage_base(heap->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->ShrinkPageToObjectSize(current, object, object_size);
surviving_object_size += object_size;
}
space->set_objects_size(surviving_object_size);
}
} // namespace
void MarkCompactCollector::Finish() { void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
...@@ -1079,9 +1096,18 @@ void MarkCompactCollector::Finish() { ...@@ -1079,9 +1096,18 @@ void MarkCompactCollector::Finish() {
sweeper()->StartSweeperTasks(); sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks(); sweeper()->StartIterabilityTasks();
// Clear the marking state of live large objects. if (heap()->new_lo_space()) {
heap_->lo_space()->ClearMarkingStateOfLiveObjects(); // New large objects can only be swept after evacuation as pointers updating
heap_->code_lo_space()->ClearMarkingStateOfLiveObjects(); // checks for dead objects in from space, similar to regular new space.
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_FINISH_SWEEP_NEW_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->new_lo_space());
}
// Shrink pages if possible after processing and filtering slots.
ShrinkPagesToObjectSizes(heap(), heap()->lo_space());
ShrinkPagesToObjectSizes(heap(), heap()->code_lo_space());
#ifdef DEBUG #ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
...@@ -3564,13 +3590,6 @@ void MarkCompactCollector::EvacuateEpilogue() { ...@@ -3564,13 +3590,6 @@ void MarkCompactCollector::EvacuateEpilogue() {
DCHECK_EQ(0, heap()->new_space()->Size()); DCHECK_EQ(0, heap()->new_space()->Size());
} }
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
if (heap()->new_lo_space()) {
heap()->new_lo_space()->FreeUnmarkedObjects();
}
// Old generation. Deallocate evacuated candidate pages. // Old generation. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates(); ReleaseEvacuationCandidates();
...@@ -4028,18 +4047,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -4028,18 +4047,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
} }
// Promote young generation large objects. // Promote young generation large objects.
if (heap()->new_lo_space()) { if (auto* new_lo_space = heap()->new_lo_space()) {
IncrementalMarking::NonAtomicMarkingState* marking_state = auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
for (auto it = heap()->new_lo_space()->begin(); LargePage* current = *(it++);
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject(); HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object)); DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) { if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current); heap()->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current); promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current); evacuation_items.emplace_back(ParallelWorkItem{}, current);
...@@ -4230,6 +4246,10 @@ void MarkCompactCollector::Evacuate() { ...@@ -4230,6 +4246,10 @@ void MarkCompactCollector::Evacuate() {
for (LargePage* p : promoted_large_pages_) { for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
} }
promoted_large_pages_.clear(); promoted_large_pages_.clear();
...@@ -4904,6 +4924,28 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { ...@@ -4904,6 +4924,28 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false; compacting_ = false;
} }
void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
PtrComprCageBase cage_base(heap()->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (!marking_state->IsBlack(object)) {
// Object is dead and page can be released.
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->RemovePage(current, object_size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
continue;
}
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
current->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(current, 0);
}
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearAllocatorState(); space->ClearAllocatorState();
...@@ -4945,13 +4987,24 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -4945,13 +4987,24 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
} }
} }
void MarkCompactCollector::StartSweepSpaces() { void MarkCompactCollector::Sweep() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG #ifdef DEBUG
state_ = SWEEP_SPACES; state_ = SWEEP_SPACES;
#endif #endif
{ {
{
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_LO, ThreadKind::kMain);
SweepLargeSpace(heap()->lo_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->code_lo_space());
}
{ {
GCTracer::Scope sweep_scope( GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain); heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
...@@ -5235,7 +5288,12 @@ void MinorMarkCompactCollector::CleanupPromotedPages() { ...@@ -5235,7 +5288,12 @@ void MinorMarkCompactCollector::CleanupPromotedPages() {
promoted_pages_.clear(); promoted_pages_.clear();
for (LargePage* p : promoted_large_pages_) { for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
} }
promoted_large_pages_.clear(); promoted_large_pages_.clear();
} }
......
...@@ -26,6 +26,7 @@ namespace internal { ...@@ -26,6 +26,7 @@ namespace internal {
class EvacuationJobTraits; class EvacuationJobTraits;
class HeapObjectVisitor; class HeapObjectVisitor;
class ItemParallelJob; class ItemParallelJob;
class LargeObjectSpace;
class LargePage; class LargePage;
class MigrationObserver; class MigrationObserver;
class ReadOnlySpace; class ReadOnlySpace;
...@@ -696,8 +697,9 @@ class MarkCompactCollector final { ...@@ -696,8 +697,9 @@ class MarkCompactCollector final {
// Starts sweeping of spaces by contributing on the main thread and setting // Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks. // up other pages for sweeping. Does not start sweeper tasks.
void StartSweepSpaces(); void Sweep();
void StartSweepSpace(PagedSpace* space); void StartSweepSpace(PagedSpace* space);
void SweepLargeSpace(LargeObjectSpace* space);
void EvacuatePrologue(); void EvacuatePrologue();
void EvacuateEpilogue(); void EvacuateEpilogue();
......
...@@ -562,6 +562,7 @@ ...@@ -562,6 +562,7 @@
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \ F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_NEW_LO) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \ F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
F(MC_MARK_CLIENT_HEAPS) \ F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \ F(MC_MARK_EMBEDDER_PROLOGUE) \
...@@ -575,6 +576,8 @@ ...@@ -575,6 +576,8 @@
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \
F(MC_SWEEP_CODE) \ F(MC_SWEEP_CODE) \
F(MC_SWEEP_CODE_LO) \
F(MC_SWEEP_LO) \
F(MC_SWEEP_MAP) \ F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \ F(MC_SWEEP_OLD) \
F(MINOR_MARK_COMPACTOR) \ F(MINOR_MARK_COMPACTOR) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment