Commit 69ca2bde authored by Nico Hartmann's avatar Nico Hartmann Committed by V8 LUCI CQ

Revert "[heap] Fix bogus object size computation"

This reverts commit 445190bf.

Reason for revert: Speculative revert for https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux%20-%20gc%20stress/38224/overview

Original change's description:
> [heap] Fix bogus object size computation
>
> The map of an object may be gone by the time we try to compute its
> size for accounting purposes.
>
> Bug: chromium:1319217
> Change-Id: I93cca766a8cedebf4ed30a3a65fd6eff5bc72bcf
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3605817
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80271}

Bug: chromium:1319217
Change-Id: I04139a4b469649a7f689643f949b106cd97dc94b
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3616503
Auto-Submit: Nico Hartmann <nicohartmann@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Nico Hartmann <nicohartmann@chromium.org>
Owners-Override: Nico Hartmann <nicohartmann@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80273}
parent f96994ba
...@@ -66,6 +66,19 @@ size_t LargeObjectSpace::Available() const { ...@@ -66,6 +66,19 @@ size_t LargeObjectSpace::Available() const {
return 0; return 0;
} }
Address LargePage::GetAddressToShrink(Address object_address,
size_t object_size) {
if (executable() == EXECUTABLE) {
return 0;
}
size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
return 0;
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(), RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
SlotSet::FREE_EMPTY_BUCKETS); SlotSet::FREE_EMPTY_BUCKETS);
...@@ -231,6 +244,22 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) { ...@@ -231,6 +244,22 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
return nullptr; return nullptr;
} }
void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
}
}
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) { void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
for (Address current = reinterpret_cast<Address>(page); for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size(); current < reinterpret_cast<Address>(page) + page->size();
...@@ -279,44 +308,42 @@ void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) { ...@@ -279,44 +308,42 @@ void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
page->set_owner(nullptr); page->set_owner(nullptr);
} }
namespace { void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
// Returns the `GetCommitPageSize()`-aligned end of the payload that can be IncrementalMarking::NonAtomicMarkingState* marking_state =
// used to shrink down an object. Returns kNullAddress if shrinking is not heap()->incremental_marking()->non_atomic_marking_state();
// supported. // Right-trimming does not update the objects_size_ counter. We are lazily
Address GetEndOfPayload(LargePage* page, Address object_address, // updating it after every GC.
size_t object_size) { size_t surviving_object_size = 0;
if (page->executable() == EXECUTABLE) {
return kNullAddress;
}
const size_t used_committed_size =
::RoundUp((object_address - page->address()) + object_size,
MemoryAllocator::GetCommitPageSize());
return (used_committed_size < page->size())
? page->address() + used_committed_size
: kNullAddress;
}
} // namespace
void LargeObjectSpace::ShrinkPageToObjectSize(LargePage* page,
HeapObject object,
size_t object_size) {
#ifdef DEBUG
PtrComprCageBase cage_base(heap()->isolate()); PtrComprCageBase cage_base(heap()->isolate());
DCHECK_EQ(object, page->GetObject()); while (current) {
DCHECK_EQ(object_size, page->GetObject().Size(cage_base)); LargePage* next_current = current->next_page();
#endif // DEBUG HeapObject object = current->GetObject();
Address free_start = GetEndOfPayload(page, object.address(), object_size); DCHECK(!marking_state->IsGrey(object));
if (free_start != kNullAddress) { size_t size = static_cast<size_t>(object.Size(cage_base));
DCHECK(!page->IsFlagSet(Page::IS_EXECUTABLE)); if (marking_state->IsBlack(object)) {
page->ClearOutOfLiveRangeSlots(free_start); Address free_start;
const size_t bytes_to_free = page->size() - (free_start - page->address()); surviving_object_size += size;
heap()->memory_allocator()->PartialFreeMemory( if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
page, free_start, bytes_to_free, page->area_start() + object_size); 0) {
size_ -= bytes_to_free; DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
AccountUncommitted(bytes_to_free); current->ClearOutOfLiveRangeSlots(free_start);
const size_t bytes_to_free =
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
}
current = next_current;
} }
objects_size_ = surviving_object_size;
} }
bool LargeObjectSpace::Contains(HeapObject object) const { bool LargeObjectSpace::Contains(HeapObject object) const {
......
...@@ -47,6 +47,10 @@ class LargePage : public MemoryChunk { ...@@ -47,6 +47,10 @@ class LargePage : public MemoryChunk {
return static_cast<const LargePage*>(list_node_.next()); return static_cast<const LargePage*>(list_node_.next());
} }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start); void ClearOutOfLiveRangeSlots(Address free_start);
private: private:
...@@ -83,8 +87,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -83,8 +87,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
int PageCount() const { return page_count_; } int PageCount() const { return page_count_; }
void ShrinkPageToObjectSize(LargePage* page, HeapObject object, // Frees unmarked objects.
size_t object_size); virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1). // Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj) const; bool Contains(HeapObject obj) const;
...@@ -137,8 +141,6 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space { ...@@ -137,8 +141,6 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
return &pending_allocation_mutex_; return &pending_allocation_mutex_;
} }
void set_objects_size(size_t objects_size) { objects_size_ = objects_size; }
protected: protected:
LargeObjectSpace(Heap* heap, AllocationSpace id); LargeObjectSpace(Heap* heap, AllocationSpace id);
...@@ -174,6 +176,9 @@ class OldLargeObjectSpace : public LargeObjectSpace { ...@@ -174,6 +176,9 @@ class OldLargeObjectSpace : public LargeObjectSpace {
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRawBackground(LocalHeap* local_heap, int object_size); AllocateRawBackground(LocalHeap* local_heap, int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page); void PromoteNewLargeObject(LargePage* page);
protected: protected:
......
...@@ -625,7 +625,7 @@ void MarkCompactCollector::CollectGarbage() { ...@@ -625,7 +625,7 @@ void MarkCompactCollector::CollectGarbage() {
heap()->memory_measurement()->FinishProcessing(native_context_stats_); heap()->memory_measurement()->FinishProcessing(native_context_stats_);
RecordObjectStats(); RecordObjectStats();
Sweep(); StartSweepSpaces();
Evacuate(); Evacuate();
Finish(); Finish();
} }
...@@ -1054,23 +1054,6 @@ void MarkCompactCollector::VerifyMarking() { ...@@ -1054,23 +1054,6 @@ void MarkCompactCollector::VerifyMarking() {
#endif #endif
} }
namespace {
void ShrinkPagesToObjectSizes(Heap* heap, OldLargeObjectSpace* space) {
size_t surviving_object_size = 0;
PtrComprCageBase cage_base(heap->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->ShrinkPageToObjectSize(current, object, object_size);
surviving_object_size += object_size;
}
space->set_objects_size(surviving_object_size);
}
} // namespace
void MarkCompactCollector::Finish() { void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
...@@ -1096,18 +1079,9 @@ void MarkCompactCollector::Finish() { ...@@ -1096,18 +1079,9 @@ void MarkCompactCollector::Finish() {
sweeper()->StartSweeperTasks(); sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks(); sweeper()->StartIterabilityTasks();
if (heap()->new_lo_space()) { // Clear the marking state of live large objects.
// New large objects can only be swept after evacuation as pointers updating heap_->lo_space()->ClearMarkingStateOfLiveObjects();
// checks for dead objects in from space, similar to regular new space. heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_FINISH_SWEEP_NEW_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->new_lo_space());
}
// Shrink pages if possible after processing and filtering slots.
ShrinkPagesToObjectSizes(heap(), heap()->lo_space());
ShrinkPagesToObjectSizes(heap(), heap()->code_lo_space());
#ifdef DEBUG #ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
...@@ -3590,6 +3564,13 @@ void MarkCompactCollector::EvacuateEpilogue() { ...@@ -3590,6 +3564,13 @@ void MarkCompactCollector::EvacuateEpilogue() {
DCHECK_EQ(0, heap()->new_space()->Size()); DCHECK_EQ(0, heap()->new_space()->Size());
} }
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
if (heap()->new_lo_space()) {
heap()->new_lo_space()->FreeUnmarkedObjects();
}
// Old generation. Deallocate evacuated candidate pages. // Old generation. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates(); ReleaseEvacuationCandidates();
...@@ -4047,15 +4028,18 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -4047,15 +4028,18 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
} }
// Promote young generation large objects. // Promote young generation large objects.
if (auto* new_lo_space = heap()->new_lo_space()) { if (heap()->new_lo_space()) {
auto* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
LargePage* current = *(it++); for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject(); HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object)); DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) { if (marking_state->IsBlack(object)) {
heap()->lo_space()->PromoteNewLargeObject(current); heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current); promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current); evacuation_items.emplace_back(ParallelWorkItem{}, current);
...@@ -4246,10 +4230,6 @@ void MarkCompactCollector::Evacuate() { ...@@ -4246,10 +4230,6 @@ void MarkCompactCollector::Evacuate() {
for (LargePage* p : promoted_large_pages_) { for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
} }
promoted_large_pages_.clear(); promoted_large_pages_.clear();
...@@ -4924,28 +4904,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { ...@@ -4924,28 +4904,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
compacting_ = false; compacting_ = false;
} }
void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
auto* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
PtrComprCageBase cage_base(heap()->isolate());
for (auto it = space->begin(); it != space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (!marking_state->IsBlack(object)) {
// Object is dead and page can be released.
const size_t object_size = static_cast<size_t>(object.Size(cage_base));
space->RemovePage(current, object_size);
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
current);
continue;
}
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
current->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(current, 0);
}
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearAllocatorState(); space->ClearAllocatorState();
...@@ -4987,24 +4945,13 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -4987,24 +4945,13 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
} }
} }
void MarkCompactCollector::Sweep() { void MarkCompactCollector::StartSweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG #ifdef DEBUG
state_ = SWEEP_SPACES; state_ = SWEEP_SPACES;
#endif #endif
{ {
{
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_LO, ThreadKind::kMain);
SweepLargeSpace(heap()->lo_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE_LO,
ThreadKind::kMain);
SweepLargeSpace(heap()->code_lo_space());
}
{ {
GCTracer::Scope sweep_scope( GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain); heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
...@@ -5288,12 +5235,7 @@ void MinorMarkCompactCollector::CleanupPromotedPages() { ...@@ -5288,12 +5235,7 @@ void MinorMarkCompactCollector::CleanupPromotedPages() {
promoted_pages_.clear(); promoted_pages_.clear();
for (LargePage* p : promoted_large_pages_) { for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
HeapObject object = p->GetObject();
Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
p->ProgressBar().ResetIfEnabled();
non_atomic_marking_state()->SetLiveBytes(p, 0);
} }
promoted_large_pages_.clear(); promoted_large_pages_.clear();
} }
......
...@@ -26,7 +26,6 @@ namespace internal { ...@@ -26,7 +26,6 @@ namespace internal {
class EvacuationJobTraits; class EvacuationJobTraits;
class HeapObjectVisitor; class HeapObjectVisitor;
class ItemParallelJob; class ItemParallelJob;
class LargeObjectSpace;
class LargePage; class LargePage;
class MigrationObserver; class MigrationObserver;
class ReadOnlySpace; class ReadOnlySpace;
...@@ -697,9 +696,8 @@ class MarkCompactCollector final { ...@@ -697,9 +696,8 @@ class MarkCompactCollector final {
// Starts sweeping of spaces by contributing on the main thread and setting // Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks. // up other pages for sweeping. Does not start sweeper tasks.
void Sweep(); void StartSweepSpaces();
void StartSweepSpace(PagedSpace* space); void StartSweepSpace(PagedSpace* space);
void SweepLargeSpace(LargeObjectSpace* space);
void EvacuatePrologue(); void EvacuatePrologue();
void EvacuateEpilogue(); void EvacuateEpilogue();
......
...@@ -562,7 +562,6 @@ ...@@ -562,7 +562,6 @@
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \ F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_NEW_LO) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \ F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
F(MC_MARK_CLIENT_HEAPS) \ F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \ F(MC_MARK_EMBEDDER_PROLOGUE) \
...@@ -576,8 +575,6 @@ ...@@ -576,8 +575,6 @@
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING) \
F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \ F(MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR) \
F(MC_SWEEP_CODE) \ F(MC_SWEEP_CODE) \
F(MC_SWEEP_CODE_LO) \
F(MC_SWEEP_LO) \
F(MC_SWEEP_MAP) \ F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \ F(MC_SWEEP_OLD) \
F(MINOR_MARK_COMPACTOR) \ F(MINOR_MARK_COMPACTOR) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment