Commit 445190bf authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

[heap] Fix bogus object size computation

The map of an object may be gone by the time we try to compute its
size for accounting purposes.

Bug: chromium:1319217
Change-Id: I93cca766a8cedebf4ed30a3a65fd6eff5bc72bcf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3605817Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80271}
parent 27ce2c06
......@@ -66,19 +66,6 @@ size_t LargeObjectSpace::Available() const {
return 0;
}
Address LargePage::GetAddressToShrink(Address object_address,
size_t object_size) {
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
......@@ -244,22 +231,6 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
return nullptr;
}
void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
}
}
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
......@@ -308,42 +279,44 @@ void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
page->set_owner(nullptr);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
namespace {
// Returns the `GetCommitPageSize()`-aligned end of the payload that can be
// used to shrink down an object. Returns kNullAddress if shrinking is not
// supported.
Address GetEndOfPayload(LargePage* page, Address object_address,
size_t object_size) {
if (page->executable() == EXECUTABLE) {
return kNullAddress;
}
const size_t used_committed_size =
::RoundUp((object_address - page->address()) + object_size,
MemoryAllocator::GetCommitPageSize());
return (used_committed_size < page->size())
? page->address() + used_committed_size
: kNullAddress;
}
} // namespace
void LargeObjectSpace::ShrinkPageToObjectSize(LargePage* page,
HeapObject object,
size_t object_size) {
#ifdef DEBUG
PtrComprCageBase cage_base(heap()->isolate());
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object.Size(cage_base));
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
}
current = next_current;
DCHECK_EQ(object, page->GetObject());
DCHECK_EQ(object_size, page->GetObject().Size(cage_base));
#endif // DEBUG
Address free_start = GetEndOfPayload(page, object.address(), object_size);
if (free_start != kNullAddress) {
DCHECK(!page->IsFlagSet(Page::IS_EXECUTABLE));
page->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free = page->size() - (free_start - page->address());
heap()->memory_allocator()->PartialFreeMemory(
page, free_start, bytes_to_free, page->area_start() + object_size);
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
objects_size_ = surviving_object_size;
}
bool LargeObjectSpace::Contains(HeapObject object) const {
......
......@@ -47,10 +47,6 @@ class LargePage : public MemoryChunk {
return static_cast<const LargePage*>(list_node_.next());
}
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start);
private:
......@@ -87,8 +83,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int PageCount() const { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
void ShrinkPageToObjectSize(LargePage* page, HeapObject object,
size_t object_size);
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj) const;
......@@ -141,6 +137,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
return &pending_allocation_mutex_;
}
void set_objects_size(size_t objects_size) { objects_size_ = objects_size; }
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
......@@ -176,9 +174,6 @@ class OldLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page);
protected:
......
......@@ -625,7 +625,7 @@ void MarkCompactCollector::CollectGarbage() {
heap()->memory_measurement()->FinishProcessing(native_context_stats_);
RecordObjectStats();
StartSweepSpaces();
Sweep();
Evacuate();
Finish();
}
......@@ -1054,6 +1054,23 @@ void MarkCompactCollector::VerifyMarking() {
#endif
}
namespace {
void ShrinkPagesToObjectSizes(Heap* heap, OldLargeObjectSpace* space) {
size_t surviving_object_size = 0;
PtrComprCageBase cage_base(heap->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->ShrinkPageToObjectSize(current, object, object_size);
surviving_object_size += object_size;
}
space->set_objects_size(surviving_object_size);
}
} // namespace
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
......@@ -1079,9 +1096,18 @@ void MarkCompactCollector::Finish() {
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
if (heap()->new_lo_space()) {
// New large objects can only be swept after evacuation as pointers updating
// checks for dead objects in from space, similar to regular new space.
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_FINISH_SWEEP_NEW_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->new_lo_space());
}
// Shrink pages if possible after processing and filtering slots.
ShrinkPagesToObjectSizes(heap(), heap()->lo_space());
ShrinkPagesToObjectSizes(heap(), heap()->code_lo_space());
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
......@@ -3564,13 +3590,6 @@ void MarkCompactCollector::EvacuateEpilogue() {
DCHECK_EQ(0, heap()->new_space()->Size());
}
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
if (heap()->new_lo_space()) {
heap()->new_lo_space()->FreeUnmarkedObjects();
}
// Old generation. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
......@@ -4028,18 +4047,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Promote young generation large objects.
if (heap()->new_lo_space()) {
IncrementalMarking::NonAtomicMarkingState* marking_state =
if (auto* new_lo_space = heap()->new_lo_space()) {
auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
heap()->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
......@@ -4230,6 +4246,10 @@ void MarkCompactCollector::Evacuate() {
for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
}
promoted_large_pages_.clear();
......@@ -4904,6 +4924,28 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false;
}
void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
PtrComprCageBase cage_base(heap()->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (!marking_state->IsBlack(object)) {
// Object is dead and page can be released.
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->RemovePage(current, object_size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
continue;
}
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
current->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(current, 0);
}
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearAllocatorState();
......@@ -4945,13 +4987,24 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
}
}
void MarkCompactCollector::StartSweepSpaces() {
void MarkCompactCollector::Sweep() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
{
{
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_LO, ThreadKind::kMain);
SweepLargeSpace(heap()->lo_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->code_lo_space());
}
{
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
......@@ -5235,7 +5288,12 @@ void MinorMarkCompactCollector::CleanupPromotedPages() {
promoted_pages_.clear();
for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
}
promoted_large_pages_.clear();
}
......
......@@ -26,6 +26,7 @@ namespace internal {
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
class LargeObjectSpace;
class LargePage;
class MigrationObserver;
class ReadOnlySpace;
......@@ -696,8 +697,9 @@ class MarkCompactCollector final {
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
void StartSweepSpaces();
void Sweep();
void StartSweepSpace(PagedSpace* space);
void SweepLargeSpace(LargeObjectSpace* space);
void EvacuatePrologue();
void EvacuateEpilogue();
......
......@@ -562,6 +562,7 @@
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_NEW_LO) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \
......@@ -575,6 +576,8 @@
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_CODE_LO) \
F(MC_SWEEP_LO) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MINOR_MARK_COMPACTOR) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment