Commit 188926ca authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Fix GC counters to account for the new large object space.

This also fixes external string table update after scavenge and
the fast promotion mode.

Bug: chromium:852420
Change-Id: I5d2e1b585b8c74970047867aa587f928e116ed73
Reviewed-on: https://chromium-review.googlesource.com/c/1454604
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59379}
parent ec30cf47
......@@ -18,6 +18,9 @@ namespace internal {
void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
if (buffer->backing_store() == nullptr) return;
// ArrayBuffer tracking works only for small objects.
DCHECK(!heap->IsLargeObject(buffer));
const size_t length = buffer->byte_length();
Page* page = Page::FromHeapObject(buffer);
{
......
......@@ -115,8 +115,8 @@ GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
end_memory_size(0),
start_holes_size(0),
end_holes_size(0),
new_space_object_size(0),
survived_new_space_object_size(0),
young_object_size(0),
survived_young_object_size(0),
incremental_marking_bytes(0),
incremental_marking_duration(0.0) {
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
......@@ -246,7 +246,8 @@ void GCTracer::Start(GarbageCollector collector,
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
current_.new_space_object_size = heap_->new_space()->Size();
current_.young_object_size =
heap_->new_space()->Size() + heap_->new_lo_space()->Size();
current_.incremental_marking_bytes = 0;
current_.incremental_marking_duration = 0;
......@@ -299,7 +300,7 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
current_.survived_new_space_object_size = heap_->SurvivedNewSpaceObjectSize();
current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
AddAllocation(current_.end_time);
......@@ -309,9 +310,9 @@ void GCTracer::Stop(GarbageCollector collector) {
case Event::SCAVENGER:
case Event::MINOR_MARK_COMPACTOR:
recorded_minor_gcs_total_.Push(
MakeBytesAndDuration(current_.new_space_object_size, duration));
recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
current_.survived_new_space_object_size, duration));
MakeBytesAndDuration(current_.young_object_size, duration));
recorded_minor_gcs_survived_.Push(
MakeBytesAndDuration(current_.survived_young_object_size, duration));
FetchBackgroundMinorGCCounters();
break;
case Event::INCREMENTAL_MARK_COMPACTOR:
......
......@@ -179,11 +179,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// after the current GC.
size_t end_holes_size;
// Size of new space objects in constructor.
size_t new_space_object_size;
// Size of young objects in constructor.
size_t young_object_size;
// Size of survived new space objects in destructor.
size_t survived_new_space_object_size;
// Size of survived young objects in destructor.
size_t survived_young_object_size;
// Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
size_t incremental_marking_bytes;
......
......@@ -286,7 +286,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Over-estimate the new space size using capacity to allow some slack.
if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
if (!CanExpandOldGeneration(new_space_->TotalCapacity() +
new_lo_space()->Size())) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
......@@ -1274,7 +1275,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
if (!CanExpandOldGeneration(new_space()->Capacity())) {
if (!CanExpandOldGeneration(new_space()->Capacity() +
new_lo_space()->Size())) {
InvokeNearHeapLimitCallback();
}
......@@ -1683,7 +1685,8 @@ bool Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
size_t start_new_space_size = Heap::new_space()->Size();
size_t start_young_generation_size =
Heap::new_space()->Size() + new_lo_space()->Size();
{
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
......@@ -1706,7 +1709,8 @@ bool Heap::PerformGarbageCollection(
break;
case SCAVENGER:
if ((fast_promotion_mode_ &&
CanExpandOldGeneration(new_space()->Size()))) {
CanExpandOldGeneration(new_space()->Size() +
new_lo_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
......@@ -1722,14 +1726,14 @@ bool Heap::PerformGarbageCollection(
ProcessPretenuringFeedback();
}
UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
if (collector != MARK_COMPACTOR) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
start_new_space_size - SurvivedNewSpaceObjectSize());
start_young_generation_size - SurvivedYoungObjectSize());
}
if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
......@@ -1949,7 +1953,8 @@ void Heap::EvacuateYoungGeneration() {
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size()));
DCHECK(
CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()));
}
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
......@@ -1974,12 +1979,21 @@ void Heap::EvacuateYoungGeneration() {
new_space()->ResetLinearAllocationArea();
new_space()->set_age_mark(new_space()->top());
for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
LargePage* page = *it;
// Increment has to happen after we save the page, because it is going to
// be removed below.
it++;
lo_space()->PromoteNewLargeObject(page);
}
// Fix up special trackers.
external_string_table_.PromoteYoung();
// GlobalHandles are updated in PostGarbageCollectonProcessing
IncrementYoungSurvivorsCounter(new_space()->Size());
IncrementPromotedObjectsSize(new_space()->Size());
size_t promoted = new_space()->Size() + new_lo_space()->Size();
IncrementYoungSurvivorsCounter(promoted);
IncrementPromotedObjectsSize(promoted);
IncrementSemiSpaceCopiedObjectSize(0);
LOG(isolate_, ResourceEvent("scavenge", "end"));
......@@ -2077,24 +2091,31 @@ bool Heap::ExternalStringTable::Contains(String string) {
return false;
}
String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
Heap* heap, FullObjectSlot p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
HeapObject obj = HeapObject::cast(*p);
MapWord first_word = obj->map_word();
String new_string;
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String string = String::cast(*p);
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
if (InFromPage(obj)) {
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String string = String::cast(obj);
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
return String();
}
heap->FinalizeExternalString(string);
return String();
}
heap->FinalizeExternalString(string);
return String();
new_string = String::cast(first_word.ToForwardingAddress());
} else {
new_string = String::cast(obj);
}
// String is still reachable.
String new_string = String::cast(first_word.ToForwardingAddress());
if (new_string->IsThinString()) {
// Filtering Thin strings out of the external string table.
return String();
......
......@@ -901,7 +901,7 @@ class Heap {
// data and clearing the resource pointer.
inline void FinalizeExternalString(String string);
static String UpdateNewSpaceReferenceInExternalStringTableEntry(
static String UpdateYoungReferenceInExternalStringTableEntry(
Heap* heap, FullObjectSlot pointer);
// ===========================================================================
......@@ -1055,7 +1055,7 @@ class Heap {
return semi_space_copied_object_size_;
}
inline size_t SurvivedNewSpaceObjectSize() {
inline size_t SurvivedYoungObjectSize() {
return promoted_objects_size_ + semi_space_copied_object_size_;
}
......
......@@ -219,7 +219,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
if (object->map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
promotion_list_.PushLargeObject(object, map, object_size);
}
......@@ -240,7 +240,7 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(
CopyAndForwardResult result;
if (HandleLargeObject(map, object, object_size, object_fields)) {
return REMOVE_SLOT;
return KEEP_SLOT;
}
SLOW_DCHECK(static_cast<size_t>(object_size) <=
......
......@@ -258,7 +258,7 @@ void ScavengerCollector::CollectGarbage() {
// Update references into new space
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
heap_->UpdateYoungReferencesInExternalStringTable(
&Heap::UpdateNewSpaceReferenceInExternalStringTableEntry);
&Heap::UpdateYoungReferenceInExternalStringTableEntry);
heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
}
......@@ -298,7 +298,7 @@ void ScavengerCollector::CollectGarbage() {
});
// Update how much has survived scavenge.
heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment