Commit 829aefb0 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Evacuate the young generation before the old generation.

With the --always_promote_young_mc flag the mark-compact collector
cannot gracefully handle allocation failures when evacuating the young
generation. In some scenarios this causes OOM crashes without invoking
NearHeapLimitCallback.

This CL ensures that the young generation is evacuated before the old
generation because old generation evacuation can be aborted if needed.
Additionally, the CL cleans up usages of CanExpandOldGeneration.

Bug: v8:10843
Change-Id: I50d83912137afa3d3dac797dd4c6bddb51612334
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2404829
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69937}
parent b67c3f53
...@@ -410,6 +410,12 @@ bool Heap::CanExpandOldGenerationBackground(size_t size) { ...@@ -410,6 +410,12 @@ bool Heap::CanExpandOldGenerationBackground(size_t size) {
memory_allocator()->Size() + size <= MaxReserved(); memory_allocator()->Size() + size <= MaxReserved();
} }
bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
// Over-estimate the new space size using capacity to allow some slack.
return CanExpandOldGeneration(size + new_space_->Capacity() +
new_lo_space_->Size());
}
bool Heap::HasBeenSetUp() const { bool Heap::HasBeenSetUp() const {
// We will always have a new space when the heap is set up. // We will always have a new space when the heap is set up.
return new_space_ != nullptr; return new_space_ != nullptr;
...@@ -435,9 +441,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, ...@@ -435,9 +441,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR; return MARK_COMPACTOR;
} }
// Over-estimate the new space size using capacity to allow some slack. if (!CanPromoteYoungAndExpandOldGeneration(0)) {
if (!CanExpandOldGeneration(new_space_->TotalCapacity() +
new_lo_space()->Size())) {
isolate_->counters() isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion() ->gc_compactor_caused_by_oldspace_exhaustion()
->Increment(); ->Increment();
...@@ -1499,8 +1503,7 @@ bool Heap::CollectGarbage(AllocationSpace space, ...@@ -1499,8 +1503,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC", this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason)); GarbageCollectionReasonToString(gc_reason));
if (!CanExpandOldGeneration(new_space()->Capacity() + if (!CanPromoteYoungAndExpandOldGeneration(0)) {
new_lo_space()->Size())) {
InvokeNearHeapLimitCallback(); InvokeNearHeapLimitCallback();
} }
...@@ -2332,8 +2335,7 @@ void Heap::EvacuateYoungGeneration() { ...@@ -2332,8 +2335,7 @@ void Heap::EvacuateYoungGeneration() {
ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) { if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_); DCHECK(fast_promotion_mode_);
DCHECK( DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()));
} }
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
...@@ -2380,8 +2382,7 @@ void Heap::EvacuateYoungGeneration() { ...@@ -2380,8 +2382,7 @@ void Heap::EvacuateYoungGeneration() {
} }
void Heap::Scavenge() { void Heap::Scavenge() {
if ((fast_promotion_mode_ && if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling( tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge); YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration(); EvacuateYoungGeneration();
......
...@@ -1902,6 +1902,7 @@ class Heap { ...@@ -1902,6 +1902,7 @@ class Heap {
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
V8_EXPORT_PRIVATE bool CanPromoteYoungAndExpandOldGeneration(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation( bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr); LocalHeap* local_heap = nullptr);
......
...@@ -395,7 +395,7 @@ int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) { ...@@ -395,7 +395,7 @@ int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
int tasks = FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), int tasks = FLAG_parallel_compaction ? Min(NumberOfAvailableCores(),
pages / (MB / Page::kPageSize) + 1) pages / (MB / Page::kPageSize) + 1)
: 1; : 1;
if (!heap_->CanExpandOldGeneration( if (!heap_->CanPromoteYoungAndExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) { static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit. // Optimize for memory usage near the heap limit.
tasks = 1; tasks = 1;
...@@ -3217,11 +3217,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3217,11 +3217,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
&page_parallel_job_semaphore_); &page_parallel_job_semaphore_);
intptr_t live_bytes = 0; intptr_t live_bytes = 0;
for (Page* page : old_space_evacuation_pages_) { // Evacuation of new space pages cannot be aborted, so it needs to run
live_bytes += non_atomic_marking_state()->live_bytes(page); // before old space evacuation.
evacuation_job.AddItem(new EvacuationItem(page));
}
for (Page* page : new_space_evacuation_pages_) { for (Page* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue; if (live_bytes_on_page == 0) continue;
...@@ -3243,6 +3240,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3243,6 +3240,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
evacuation_job.AddItem(new EvacuationItem(page)); evacuation_job.AddItem(new EvacuationItem(page));
} }
for (Page* page : old_space_evacuation_pages_) {
live_bytes += non_atomic_marking_state()->live_bytes(page);
evacuation_job.AddItem(new EvacuationItem(page));
}
// Promote young generation large objects. // Promote young generation large objects.
IncrementalMarking::NonAtomicMarkingState* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
......
...@@ -486,7 +486,7 @@ int ScavengerCollector::NumberOfScavengeTasks() { ...@@ -486,7 +486,7 @@ int ScavengerCollector::NumberOfScavengeTasks() {
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1; static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
int tasks = int tasks =
Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores)); Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
if (!heap_->CanExpandOldGeneration( if (!heap_->CanPromoteYoungAndExpandOldGeneration(
static_cast<size_t>(tasks * Page::kPageSize))) { static_cast<size_t>(tasks * Page::kPageSize))) {
// Optimize for memory usage near the heap limit. // Optimize for memory usage near the heap limit.
tasks = 1; tasks = 1;
......
...@@ -1215,6 +1215,47 @@ HEAP_TEST(Regress10560) { ...@@ -1215,6 +1215,47 @@ HEAP_TEST(Regress10560) {
} }
} }
UNINITIALIZED_TEST(Regress10843) {
FLAG_max_semi_space_size = 2;
FLAG_min_semi_space_size = 2;
FLAG_max_old_space_size = 8;
FLAG_always_compact = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
Factory* factory = i_isolate->factory();
Heap* heap = i_isolate->heap();
bool callback_was_invoked = false;
heap->AddNearHeapLimitCallback(
[](void* data, size_t current_heap_limit,
size_t initial_heap_limit) -> size_t {
*reinterpret_cast<bool*>(data) = true;
return current_heap_limit * 2;
},
&callback_was_invoked);
{
HandleScope scope(i_isolate);
std::vector<Handle<FixedArray>> arrays;
for (int i = 0; i < 140; i++) {
arrays.push_back(factory->NewFixedArray(10000));
}
CcTest::CollectAllGarbage(i_isolate);
CcTest::CollectAllGarbage(i_isolate);
for (int i = 0; i < 40; i++) {
arrays.push_back(factory->NewFixedArray(10000));
}
CcTest::CollectAllGarbage(i_isolate);
for (int i = 0; i < 100; i++) {
arrays.push_back(factory->NewFixedArray(10000));
}
CHECK(callback_was_invoked);
}
isolate->Dispose();
}
// Tests that spill slots from optimized code don't have weak pointers. // Tests that spill slots from optimized code don't have weak pointers.
TEST(Regress10774) { TEST(Regress10774) {
i::FLAG_allow_natives_syntax = true; i::FLAG_allow_natives_syntax = true;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment