Commit 717630c1 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Account potentially marked bytes collected by Scavenger.

We have a heuristics that allows the main thread to skip marking work
by the amount marked by the background tasks.
Scavenges that happen during incremental marking can reclaim marked
objects, which should be accounted in the main thread marking schedule.

Bug: chromium:789530
Change-Id: I9b922e05202e3b7665ec191c4fe303dbe7f72a50
Reviewed-on: https://chromium-review.googlesource.com/840843
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50285}
parent dcd60e8c
...@@ -1572,7 +1572,7 @@ bool Heap::PerformGarbageCollection( ...@@ -1572,7 +1572,7 @@ bool Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted(); EnsureFromSpaceIsCommitted();
int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); size_t start_new_space_size = Heap::new_space()->Size();
{ {
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_); Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
...@@ -1611,9 +1611,16 @@ bool Heap::PerformGarbageCollection( ...@@ -1611,9 +1611,16 @@ bool Heap::PerformGarbageCollection(
ProcessPretenuringFeedback(); ProcessPretenuringFeedback();
} }
UpdateSurvivalStatistics(start_new_space_size); UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
ConfigureInitialOldGenerationSize(); ConfigureInitialOldGenerationSize();
if (collector != MARK_COMPACTOR) {
// Objects that died in the new space might have been accounted
// as bytes marked ahead of schedule by the incremental marker.
incremental_marking()->UpdateMarkedBytesAfterScavenge(
start_new_space_size - SurvivedNewSpaceObjectSize());
}
if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) { if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_); ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
} }
......
...@@ -638,6 +638,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() { ...@@ -638,6 +638,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
}); });
} }
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
size_t dead_bytes_in_new_space) {
if (!IsMarking()) return;
bytes_marked_ahead_of_schedule_ -=
Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
}
bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) { bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
if (!obj->IsFixedArray()) return false; if (!obj->IsFixedArray()) return false;
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
......
...@@ -165,6 +165,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -165,6 +165,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeIncrementally(); void FinalizeIncrementally();
void UpdateMarkingWorklistAfterScavenge(); void UpdateMarkingWorklistAfterScavenge();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry(); void Hurry();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment