Commit 62b85421 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Minor MC fixes for young large objects

Bug: chromium:852420
Change-Id: I08207462e1177fb7e56d37ce221d8bef21cfb25e
Reviewed-on: https://chromium-review.googlesource.com/c/1462964Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59500}
parent 47270ebf
...@@ -2937,11 +2937,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -2937,11 +2937,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
} }
// Promote young generation large objects. // Promote young generation large objects.
LargePage* current = heap()->new_lo_space()->first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
while (current) {
LargePage* next_current = current->next_page(); for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject(); HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object)); DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) { if (marking_state->IsBlack(object)) {
...@@ -2949,7 +2951,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -2949,7 +2951,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
evacuation_job.AddItem(new EvacuationItem(current)); evacuation_job.AddItem(new EvacuationItem(current));
} }
current = next_current;
} }
if (evacuation_job.NumberOfItems() == 0) return; if (evacuation_job.NumberOfItems() == 0) return;
...@@ -3015,13 +3016,13 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk, ...@@ -3015,13 +3016,13 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
IterationMode iteration_mode) { IterationMode iteration_mode) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitBlackObjectsNoFail"); "LiveObjectVisitor::VisitBlackObjectsNoFail");
DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE); if (chunk->IsLargePage()) {
if (chunk->owner()->identity() == LO_SPACE) {
HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject(); HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
DCHECK(marking_state->IsBlack(object)); if (marking_state->IsBlack(object)) {
const bool success = visitor->Visit(object, object->Size()); const bool success = visitor->Visit(object, object->Size());
USE(success); USE(success);
DCHECK(success); DCHECK(success);
}
} else { } else {
for (auto object_and_size : for (auto object_and_size :
LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) { LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
...@@ -3044,6 +3045,14 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk, ...@@ -3044,6 +3045,14 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
IterationMode iteration_mode) { IterationMode iteration_mode) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"LiveObjectVisitor::VisitGreyObjectsNoFail"); "LiveObjectVisitor::VisitGreyObjectsNoFail");
if (chunk->IsLargePage()) {
HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
if (marking_state->IsGrey(object)) {
const bool success = visitor->Visit(object, object->Size());
USE(success);
DCHECK(success);
}
} else {
for (auto object_and_size : for (auto object_and_size :
LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) { LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
HeapObject const object = object_and_size.first; HeapObject const object = object_and_size.first;
...@@ -3052,6 +3061,7 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk, ...@@ -3052,6 +3061,7 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
USE(success); USE(success);
DCHECK(success); DCHECK(success);
} }
}
if (iteration_mode == kClearMarkbits) { if (iteration_mode == kClearMarkbits) {
marking_state->ClearLiveness(chunk); marking_state->ClearLiveness(chunk);
} }
...@@ -4038,10 +4048,10 @@ class YoungGenerationRecordMigratedSlotVisitor final ...@@ -4038,10 +4048,10 @@ class YoungGenerationRecordMigratedSlotVisitor final
p->IsToPage(), p->IsToPage(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage()); p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>( RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
Page::FromAddress(slot), slot); Page::FromHeapObject(host), slot);
} else if (p->IsEvacuationCandidate() && IsLive(host)) { } else if (p->IsEvacuationCandidate() && IsLive(host)) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>( RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
Page::FromAddress(slot), slot); Page::FromHeapObject(host), slot);
} }
} }
} }
...@@ -4176,6 +4186,10 @@ void MinorMarkCompactCollector::CollectGarbage() { ...@@ -4176,6 +4186,10 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->concurrent_marking()->ClearMemoryChunkData(p); heap()->concurrent_marking()->ClearMemoryChunkData(p);
} }
} }
// Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead.
// TODO(ulan): Don't free all as soon as we have an intermediate generation.
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
} }
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks( RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
...@@ -4193,6 +4207,7 @@ void MinorMarkCompactCollector::CollectGarbage() { ...@@ -4193,6 +4207,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
void MinorMarkCompactCollector::MakeIterable( void MinorMarkCompactCollector::MakeIterable(
Page* p, MarkingTreatmentMode marking_mode, Page* p, MarkingTreatmentMode marking_mode,
FreeSpaceTreatmentMode free_space_mode) { FreeSpaceTreatmentMode free_space_mode) {
CHECK(!p->IsLargePage());
// We have to clear the full collectors markbits for the areas that we // We have to clear the full collectors markbits for the areas that we
// remove here. // remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector(); MarkCompactCollector* full_collector = heap()->mark_compact_collector();
...@@ -4332,6 +4347,9 @@ void MinorMarkCompactCollector::EvacuatePrologue() { ...@@ -4332,6 +4347,9 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
} }
new_space->Flip(); new_space->Flip();
new_space->ResetLinearAllocationArea(); new_space->ResetLinearAllocationArea();
heap()->new_lo_space()->Flip();
heap()->new_lo_space()->ResetPendingObject();
} }
void MinorMarkCompactCollector::EvacuateEpilogue() { void MinorMarkCompactCollector::EvacuateEpilogue() {
...@@ -4666,7 +4684,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk, ...@@ -4666,7 +4684,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes( new_to_old_page_visitor_.account_moved_bytes(
marking_state->live_bytes(chunk)); marking_state->live_bytes(chunk));
if (chunk->owner()->identity() != NEW_LO_SPACE) { if (!chunk->IsLargePage()) {
// TODO(mlippautz): If cleaning array buffers is too slow here we can // TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC. // delay it until the next GC.
ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state); ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
...@@ -4689,7 +4707,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk, ...@@ -4689,7 +4707,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
LiveObjectVisitor::kKeepMarking); LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes( new_to_new_page_visitor_.account_moved_bytes(
marking_state->live_bytes(chunk)); marking_state->live_bytes(chunk));
DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE); DCHECK(!chunk->IsLargePage());
// TODO(mlippautz): If cleaning array buffers is too slow here we can // TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC. // delay it until the next GC.
ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state); ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
...@@ -4730,6 +4748,20 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() { ...@@ -4730,6 +4748,20 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
} }
evacuation_job.AddItem(new EvacuationItem(page)); evacuation_job.AddItem(new EvacuationItem(page));
} }
// Promote young generation large objects.
for (auto it = heap()->new_lo_space()->begin();
it != heap()->new_lo_space()->end();) {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject();
DCHECK(!non_atomic_marking_state_.IsBlack(object));
if (non_atomic_marking_state_.IsGrey(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
evacuation_job.AddItem(new EvacuationItem(current));
}
}
if (evacuation_job.NumberOfItems() == 0) return; if (evacuation_job.NumberOfItems() == 0) return;
YoungGenerationMigrationObserver observer(heap(), YoungGenerationMigrationObserver observer(heap(),
......
...@@ -184,7 +184,9 @@ class LiveObjectRange { ...@@ -184,7 +184,9 @@ class LiveObjectRange {
: chunk_(chunk), : chunk_(chunk),
bitmap_(bitmap), bitmap_(bitmap),
start_(chunk_->area_start()), start_(chunk_->area_start()),
end_(chunk->area_end()) {} end_(chunk->area_end()) {
DCHECK(!chunk->IsLargePage());
}
inline iterator begin(); inline iterator begin();
inline iterator end(); inline iterator end();
......
...@@ -292,7 +292,7 @@ void ScavengerCollector::CollectGarbage() { ...@@ -292,7 +292,7 @@ void ScavengerCollector::CollectGarbage() {
// Since we promote all surviving large objects immediatelly, all remaining // Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead. // large objects must be dead.
// TODO(hpayer): Don't free all as soon as we have an intermediate generation. // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
heap_->new_lo_space()->FreeAllObjects(); heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) { RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) { if (chunk->SweepingDone()) {
......
...@@ -3759,6 +3759,15 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) { ...@@ -3759,6 +3759,15 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking()); page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE); page->SetFlag(MemoryChunk::TO_PAGE);
pending_object_.store(result->address(), std::memory_order_relaxed); pending_object_.store(result->address(), std::memory_order_relaxed);
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
}
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence(); page->InitializationMemoryFence();
DCHECK(page->IsLargePage()); DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE); DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
...@@ -3775,18 +3784,28 @@ void NewLargeObjectSpace::Flip() { ...@@ -3775,18 +3784,28 @@ void NewLargeObjectSpace::Flip() {
} }
} }
void NewLargeObjectSpace::FreeAllObjects() { void NewLargeObjectSpace::FreeDeadObjects(
LargePage* current = first_page(); const std::function<bool(HeapObject)>& is_dead) {
while (current) { bool is_marking = heap()->incremental_marking()->IsMarking();
LargePage* next_current = current->next_page(); size_t surviving_object_size = 0;
RemovePage(current, static_cast<size_t>(current->GetObject()->Size())); for (auto it = begin(); it != end();) {
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>( LargePage* page = *it;
current); it++;
current = next_current; HeapObject object = page->GetObject();
size_t size = static_cast<size_t>(object->Size());
if (is_dead(object)) {
RemovePage(page, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
} else {
surviving_object_size += size;
}
} }
// Right-trimming does not update the objects_size_ counter. We are lazily // Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC. // updating it after every GC.
objects_size_ = 0; objects_size_ = surviving_object_size;
} }
void NewLargeObjectSpace::SetCapacity(size_t capacity) { void NewLargeObjectSpace::SetCapacity(size_t capacity) {
......
...@@ -3093,7 +3093,7 @@ class NewLargeObjectSpace : public LargeObjectSpace { ...@@ -3093,7 +3093,7 @@ class NewLargeObjectSpace : public LargeObjectSpace {
void Flip(); void Flip();
void FreeAllObjects(); void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
void SetCapacity(size_t capacity); void SetCapacity(size_t capacity);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment