Commit 10eac4eb authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

Reland "[heap] Fix bogus object size computation"

This is a reland of commit 445190bf

The fix addresses the issue where object size accounting went out of
sync because of right-trimmed LO in new space that were migrated with
a different size than they were accounted for.

The fix now iterates only live objects for size computation which
avoids accessing reclaimed maps and fixes up the objects accounting.

Original change's description:
> [heap] Fix bogus object size computation
>
> The map of an object may be gone by the time we try to compute its
> size for accounting purposes.
>
> Bug: chromium:1319217
> Change-Id: I93cca766a8cedebf4ed30a3a65fd6eff5bc72bcf
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3605817
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80271}

Bug: chromium:1319217
Change-Id: I8d032edf96a4bf4b0faa4bbd9b0be247051c49fb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3616507Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80316}
parent 9a06f717
......@@ -66,19 +66,6 @@ size_t LargeObjectSpace::Available() const {
return 0;
}
Address LargePage::GetAddressToShrink(Address object_address,
size_t object_size) {
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS);
......@@ -244,22 +231,6 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
return nullptr;
}
void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
}
}
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
......@@ -282,10 +253,9 @@ void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
PtrComprCageBase cage_base(heap()->isolate());
size_t object_size = static_cast<size_t>(page->GetObject().Size(cage_base));
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page);
page->ClearFlag(MemoryChunk::FROM_PAGE);
AddPage(page, object_size);
AddPage(page, static_cast<size_t>(page->GetObject().Size(cage_base)));
}
void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
......@@ -298,52 +268,52 @@ void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
page->SetOldGenerationPageFlags(!is_off_thread() &&
heap()->incremental_marking()->IsMarking());
}
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
void LargeObjectSpace::RemovePage(LargePage* page) {
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
objects_size_ -= object_size;
page_count_--;
memory_chunk_list_.Remove(page);
page->set_owner(nullptr);
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
namespace {
// Returns the `GetCommitPageSize()`-aligned end of the payload that can be
// used to shrink down an object. Returns kNullAddress if shrinking is not
// supported.
Address GetEndOfPayload(LargePage* page, Address object_address,
size_t object_size) {
if (page->executable() == EXECUTABLE) {
return kNullAddress;
}
const size_t used_committed_size =
::RoundUp((object_address - page->address()) + object_size,
MemoryAllocator::GetCommitPageSize());
return (used_committed_size < page->size())
? page->address() + used_committed_size
: kNullAddress;
}
} // namespace
void LargeObjectSpace::ShrinkPageToObjectSize(LargePage* page,
HeapObject object,
size_t object_size) {
#ifdef DEBUG
PtrComprCageBase cage_base(heap()->isolate());
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
size_t size = static_cast<size_t>(object.Size(cage_base));
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
}
current = next_current;
DCHECK_EQ(object, page->GetObject());
DCHECK_EQ(object_size, page->GetObject().Size(cage_base));
#endif // DEBUG
Address free_start = GetEndOfPayload(page, object.address(), object_size);
if (free_start != kNullAddress) {
DCHECK(!page->IsFlagSet(Page::IS_EXECUTABLE));
page->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free = page->size() - (free_start - page->address());
heap()->memory_allocator()->PartialFreeMemory(
page, free_start, bytes_to_free, page->area_start() + object_size);
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
objects_size_ = surviving_object_size;
}
bool LargeObjectSpace::Contains(HeapObject object) const {
......@@ -558,17 +528,16 @@ void NewLargeObjectSpace::FreeDeadObjects(
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
size_t size = static_cast<size_t>(object.Size(cage_base));
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
RemovePage(page);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
} else {
surviving_object_size += size;
surviving_object_size += static_cast<size_t>(object.Size(cage_base));
}
}
// Right-trimming does not update the objects_size_ counter. We are lazily
......@@ -604,10 +573,10 @@ void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
InsertChunkMapEntries(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
void CodeLargeObjectSpace::RemovePage(LargePage* page) {
RemoveChunkMapEntries(page);
heap()->isolate()->RemoveCodeMemoryChunk(page);
OldLargeObjectSpace::RemovePage(page, object_size);
OldLargeObjectSpace::RemovePage(page);
}
} // namespace internal
......
......@@ -47,10 +47,6 @@ class LargePage : public MemoryChunk {
return static_cast<const LargePage*>(list_node_.next());
}
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start);
private:
......@@ -87,8 +83,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int PageCount() const { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
void ShrinkPageToObjectSize(LargePage* page, HeapObject object,
size_t object_size);
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj) const;
......@@ -100,7 +96,7 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
bool IsEmpty() const { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page);
LargePage* first_page() override {
return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
......@@ -141,6 +137,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
return &pending_allocation_mutex_;
}
void set_objects_size(size_t objects_size) { objects_size_ = objects_size; }
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
......@@ -176,9 +174,6 @@ class OldLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page);
protected:
......@@ -225,7 +220,7 @@ class CodeLargeObjectSpace : public OldLargeObjectSpace {
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
......
......@@ -37,6 +37,7 @@
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
......@@ -625,7 +626,7 @@ void MarkCompactCollector::CollectGarbage() {
heap()->memory_measurement()->FinishProcessing(native_context_stats_);
RecordObjectStats();
StartSweepSpaces();
Sweep();
Evacuate();
Finish();
}
......@@ -1054,6 +1055,23 @@ void MarkCompactCollector::VerifyMarking() {
#endif
}
namespace {
void ShrinkPagesToObjectSizes(Heap* heap, OldLargeObjectSpace* space) {
size_t surviving_object_size = 0;
PtrComprCageBase cage_base(heap->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->ShrinkPageToObjectSize(current, object, object_size);
surviving_object_size += object_size;
}
space->set_objects_size(surviving_object_size);
}
} // namespace
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
......@@ -1076,12 +1094,21 @@ void MarkCompactCollector::Finish() {
local_weak_objects_.reset();
weak_objects_.next_ephemerons.Clear();
if (heap()->new_lo_space()) {
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_FINISH_SWEEP_NEW_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->new_lo_space());
}
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
// Shrink pages if possible after processing and filtering slots.
ShrinkPagesToObjectSizes(heap(), heap()->lo_space());
ShrinkPagesToObjectSizes(heap(), heap()->code_lo_space());
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
......@@ -3564,19 +3591,9 @@ void MarkCompactCollector::EvacuateEpilogue() {
DCHECK_EQ(0, heap()->new_space()->Size());
}
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
if (heap()->new_lo_space()) {
heap()->new_lo_space()->FreeUnmarkedObjects();
}
// Old generation. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
#ifdef DEBUG
MemoryChunkIterator chunk_iterator(heap());
......@@ -4028,23 +4045,21 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Promote young generation large objects.
if (heap()->new_lo_space()) {
IncrementalMarking::NonAtomicMarkingState* marking_state =
if (auto* new_lo_space = heap()->new_lo_space()) {
auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
heap()->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
new_lo_space->set_objects_size(0);
}
const size_t pages_count = evacuation_items.size();
......@@ -4230,6 +4245,10 @@ void MarkCompactCollector::Evacuate() {
for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
}
promoted_large_pages_.clear();
......@@ -4904,6 +4923,31 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false;
}
void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
PtrComprCageBase cage_base(heap()->isolate());
size_t surviving_object_size = 0;
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (!marking_state->IsBlack(object)) {
// Object is dead and page can be released.
space->RemovePage(current);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
continue;
}
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
current->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(current, 0);
surviving_object_size += static_cast<size_t>(object.Size(cage_base));
}
space->set_objects_size(surviving_object_size);
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearAllocatorState();
......@@ -4945,13 +4989,24 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
}
}
void MarkCompactCollector::StartSweepSpaces() {
void MarkCompactCollector::Sweep() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
{
{
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_LO, ThreadKind::kMain);
SweepLargeSpace(heap()->lo_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->code_lo_space());
}
{
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
......@@ -5235,7 +5290,12 @@ void MinorMarkCompactCollector::CleanupPromotedPages() {
promoted_pages_.clear();
for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
}
promoted_large_pages_.clear();
}
......@@ -6125,6 +6185,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
heap()->new_lo_space()->set_objects_size(0);
}
if (evacuation_items.empty()) return;
......
......@@ -26,6 +26,7 @@ namespace internal {
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
class LargeObjectSpace;
class LargePage;
class MigrationObserver;
class ReadOnlySpace;
......@@ -696,8 +697,9 @@ class MarkCompactCollector final {
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
void StartSweepSpaces();
void Sweep();
void StartSweepSpace(PagedSpace* space);
void SweepLargeSpace(LargeObjectSpace* space);
void EvacuatePrologue();
void EvacuateEpilogue();
......
......@@ -517,6 +517,7 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
heap_->lo_space()->PromoteNewLargeObject(page);
}
surviving_new_large_objects_.clear();
heap_->new_lo_space()->set_objects_size(0);
}
void ScavengerCollector::MergeSurvivingNewLargeObjects(
......
......@@ -562,6 +562,7 @@
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_NEW_LO) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \
......@@ -575,6 +576,8 @@
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_CODE_LO) \
F(MC_SWEEP_LO) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(MINOR_MARK_COMPACTOR) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment