Commit d7bc6e80 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Delay OOM during GC until NearHeapLimitCallback is invoked

This allows GC to go slightly over the max heap limit in order to give
NearHeapLimitCallback a chance to run and increase the limit.

Based on the suggestion by Kenton Varda.

Change-Id: I9c084b5a4c8fb7b9ce331b565958391c1be56add
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2505724
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70884}
parent cd06e5f1
......@@ -396,7 +396,7 @@ size_t Heap::Available() {
}
bool Heap::CanExpandOldGeneration(size_t size) {
if (force_oom_) return false;
if (force_oom_ || force_gc_on_next_allocation_) return false;
if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
......@@ -1509,16 +1509,14 @@ bool Heap::CollectGarbage(AllocationSpace space,
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
current_gc_flags_ & kForcedGC;
current_gc_flags_ & kForcedGC ||
force_gc_on_next_allocation_;
if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
GarbageCollectionReasonToString(gc_reason));
if (!CanPromoteYoungAndExpandOldGeneration(0)) {
InvokeNearHeapLimitCallback();
}
// Filter on-stack reference below this method.
isolate()
->global_handles()
......@@ -1693,6 +1691,13 @@ bool Heap::CollectGarbage(AllocationSpace space,
kGCCallbackScheduleIdleGarbageCollection);
}
if (!CanExpandOldGeneration(0)) {
InvokeNearHeapLimitCallback();
if (!CanExpandOldGeneration(0)) {
FatalProcessOutOfMemory("Reached heap limit");
}
}
return freed_global_handles > 0;
}
......
......@@ -660,6 +660,7 @@ class Heap {
}
void SetGCState(HeapState state);
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
bool force_oom() const { return force_oom_; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
......@@ -1988,6 +1989,9 @@ class Heap {
void FinalizePartialMap(Map map);
void set_force_oom(bool value) { force_oom_ = value; }
void set_force_gc_on_next_allocation() {
force_gc_on_next_allocation_ = true;
}
// ===========================================================================
// Retaining path tracing ====================================================
......@@ -2271,6 +2275,7 @@ class Heap {
// Used for testing purposes.
bool force_oom_ = false;
bool force_gc_on_next_allocation_ = false;
bool delay_sweeper_tasks_for_testing_ = false;
HeapObject pending_layout_change_object_;
......
......@@ -844,6 +844,18 @@ bool CompactionSpace::RefillLabMain(int size_in_bytes,
return RawRefillLabMain(size_in_bytes, origin);
}
bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
Page* page = Expand();
if (!page) return false;
if (!is_compaction_space()) {
heap()->NotifyOldGenerationExpansion(identity(), page);
}
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin);
}
bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
// Non-compaction local spaces are not supported.
DCHECK_IMPLIES(is_local_space(), is_compaction_space());
......@@ -886,33 +898,22 @@ bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
heap()->CanExpandOldGeneration(AreaSize())) {
Page* page = Expand();
if (page) {
if (!is_compaction_space()) {
heap()->NotifyOldGenerationExpansion(identity(), page);
}
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin);
if (TryExpand(size_in_bytes, origin)) {
return true;
}
}
if (is_compaction_space()) {
return ContributeToSweepingMain(0, 0, size_in_bytes, origin);
} else {
DCHECK(!is_local_space());
if (collector->sweeping_in_progress()) {
// Complete sweeping for this space.
collector->DrainSweepingWorklistForSpace(identity());
RefillFreeList();
// Try sweeping all pages.
if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) {
return true;
}
// Last try to acquire memory from free list.
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
if (heap()->gc_state() != Heap::NOT_IN_GC && !heap()->force_oom()) {
// Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
// GC and give it a chance to increase the heap limit.
return TryExpand(size_in_bytes, origin);
}
return false;
}
bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
......@@ -926,12 +927,11 @@ bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
int max_freed = collector->sweeper()->ParallelSweepSpace(
identity(), required_freed_bytes, max_pages,
invalidated_slots_in_free_space);
collector->sweeper()->ParallelSweepSpace(identity(), required_freed_bytes,
max_pages,
invalidated_slots_in_free_space);
RefillFreeList();
if (max_freed >= size_in_bytes)
return TryAllocationFromFreeListMain(size_in_bytes, origin);
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
}
......
......@@ -386,6 +386,9 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationAlignment alignment,
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
Executability executable_;
LocalSpaceKind local_space_kind_;
......
......@@ -54,6 +54,11 @@ HEAP_TEST(CompactionFullAbortedPage) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
reinterpret_cast<Heap*>(heap)->set_force_oom(false);
return limit;
};
heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
......@@ -84,6 +89,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
namespace {
......@@ -115,6 +121,11 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
reinterpret_cast<Heap*>(heap)->set_force_oom(false);
return limit;
};
heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
......@@ -171,6 +182,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
}
}
}
heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
......@@ -189,6 +201,12 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
reinterpret_cast<Heap*>(heap)->set_force_oom(false);
return limit;
};
heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
......@@ -247,6 +265,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
}
}
}
heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
......@@ -267,6 +286,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
reinterpret_cast<Heap*>(heap)->set_force_oom(false);
return limit;
};
heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
......@@ -334,6 +358,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
CheckInvariantsOfAbortedPage(to_be_aborted_page);
}
}
heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
......@@ -357,6 +382,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
reinterpret_cast<Heap*>(heap)->set_force_oom(false);
return limit;
};
heap->AddNearHeapLimitCallback(reset_oom, heap);
{
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
......@@ -458,6 +488,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
CcTest::CollectGarbage(NEW_SPACE);
}
}
heap->RemoveNearHeapLimitCallback(reset_oom, 0u);
}
} // namespace heap
......
......@@ -1194,15 +1194,7 @@ HEAP_TEST(Regress10560) {
CHECK(function->shared().GetBytecodeArray().IsOld());
CHECK(function->shared().is_compiled());
heap->set_force_oom(true);
heap->AddNearHeapLimitCallback(
[](void* data, size_t current_heap_limit,
size_t initial_heap_limit) -> size_t {
Heap* heap = static_cast<Heap*>(data);
heap->set_force_oom(false);
return 0;
},
heap);
heap->set_force_gc_on_next_allocation();
// Allocate feedback vector.
IsCompiledScope is_compiled_scope(
......@@ -5592,6 +5584,11 @@ HEAP_TEST(Regress589413) {
// Add the array in root set.
handle(byte_array, isolate);
}
auto reset_oom = [](void* heap, size_t limit, size_t) -> size_t {
reinterpret_cast<Heap*>(heap)->set_force_oom(false);
return limit;
};
heap->AddNearHeapLimitCallback(reset_oom, heap);
{
// Ensure that incremental marking is not started unexpectedly.
......@@ -5655,6 +5652,7 @@ HEAP_TEST(Regress589413) {
// Force allocation from the free list.
heap->set_force_oom(true);
CcTest::CollectGarbage(OLD_SPACE);
heap->RemoveNearHeapLimitCallback(reset_oom, 0);
}
TEST(Regress598319) {
......@@ -6832,7 +6830,10 @@ UNINITIALIZED_TEST(OutOfMemoryLargeObjects) {
factory->NewFixedArray(kFixedArrayLength);
}
}
CHECK_LE(state.old_generation_capacity_at_oom, kOldGenerationLimit);
CHECK_LE(state.old_generation_capacity_at_oom,
kOldGenerationLimit + state.new_space_capacity_at_oom +
state.new_lo_space_size_at_oom +
FixedArray::SizeFor(kFixedArrayLength));
CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom +
state.new_space_capacity_at_oom +
state.new_lo_space_size_at_oom +
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment