Commit ed3636e2 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Ensure that OOM callback is called before OOM.

This patch also fixes MaxReserved() to accound for page headers and
adds two tests for heap size and memory allocator size near OOM.

Bug: chromium:824214
Change-Id: I5bbe00a9d6a5798cdf4481861a10dca842244a63
Reviewed-on: https://chromium-review.googlesource.com/973614Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52184}
parent b6ddadd0
......@@ -251,6 +251,12 @@ Heap::Heap()
RememberUnmappedPage(nullptr, false);
}
size_t Heap::MaxReserved() {
const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
return static_cast<size_t>(
(2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
}
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
......@@ -1272,12 +1278,16 @@ void Heap::EnsureFillerObjectAtTop() {
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
if (!CanExpandOldGeneration(new_space()->Capacity())) {
InvokeOutOfMemoryCallback();
}
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
......
......@@ -1396,9 +1396,7 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
size_t MaxReserved() {
return 2 * max_semi_space_size_ + max_old_generation_size_;
}
size_t MaxReserved();
size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
......@@ -2067,10 +2065,6 @@ class Heap {
bool CanExpandOldGeneration(size_t size);
bool IsCloseToOutOfMemory(size_t slack) {
return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
}
bool ShouldExpandOldGenerationOnSlowAllocation();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
......
......@@ -945,7 +945,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
const size_t kTargetStepCountAtOOM = 32;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
if (heap()->IsCloseToOutOfMemory(oom_slack)) {
if (!heap()->CanExpandOldGeneration(oom_slack)) {
return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
}
......
......@@ -5972,6 +5972,91 @@ HEAP_TEST(Regress779503) {
CcTest::heap()->delay_sweeper_tasks_for_testing_ = false;
}
struct OutOfMemoryState {
Heap* heap;
bool oom_triggered;
size_t old_generation_capacity_at_oom;
size_t memory_allocator_size_at_oom;
};
void OutOfMemoryCallback(void* raw_state) {
OutOfMemoryState* state = static_cast<OutOfMemoryState*>(raw_state);
Heap* heap = state->heap;
state->oom_triggered = true;
state->old_generation_capacity_at_oom = heap->OldGenerationCapacity();
state->memory_allocator_size_at_oom = heap->memory_allocator()->Size();
heap->IncreaseHeapLimitForDebugging();
}
size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) {
// Size to capacity factor.
double factor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
// Some tables (e.g. deoptimization table) are allocated directly with the
// memory allocator. Allow some slack to account for them.
size_t slack = 1 * MB;
return static_cast<size_t>(capacity * factor) + slack;
}
UNINITIALIZED_TEST(OutOfMemorySmallObjects) {
const size_t kOldGenerationLimit = 300 * MB;
FLAG_max_old_space_size = kOldGenerationLimit / MB;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
Isolate* isolate =
reinterpret_cast<Isolate*>(v8::Isolate::New(create_params));
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
OutOfMemoryState state;
state.heap = heap;
state.oom_triggered = false;
heap->SetOutOfMemoryCallback(OutOfMemoryCallback, &state);
{
HandleScope handle_scope(isolate);
while (!state.oom_triggered) {
factory->NewFixedArray(100);
}
}
CHECK_LE(state.old_generation_capacity_at_oom,
kOldGenerationLimit + heap->new_space()->Capacity());
CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom +
heap->new_space()->Capacity());
CHECK_LE(
state.memory_allocator_size_at_oom,
MemoryAllocatorSizeFromHeapCapacity(state.old_generation_capacity_at_oom +
2 * heap->new_space()->Capacity()));
reinterpret_cast<v8::Isolate*>(isolate)->Dispose();
}
UNINITIALIZED_TEST(OutOfMemoryLargeObjects) {
const size_t kOldGenerationLimit = 300 * MB;
FLAG_max_old_space_size = kOldGenerationLimit / MB;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
Isolate* isolate =
reinterpret_cast<Isolate*>(v8::Isolate::New(create_params));
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
OutOfMemoryState state;
state.heap = heap;
state.oom_triggered = false;
heap->SetOutOfMemoryCallback(OutOfMemoryCallback, &state);
const int kFixedArrayLength = 1000000;
{
HandleScope handle_scope(isolate);
while (!state.oom_triggered) {
factory->NewFixedArray(kFixedArrayLength);
}
}
CHECK_LE(state.old_generation_capacity_at_oom, kOldGenerationLimit);
CHECK_LE(kOldGenerationLimit, state.old_generation_capacity_at_oom +
FixedArray::SizeFor(kFixedArrayLength));
CHECK_LE(
state.memory_allocator_size_at_oom,
MemoryAllocatorSizeFromHeapCapacity(state.old_generation_capacity_at_oom +
2 * heap->new_space()->Capacity()));
reinterpret_cast<v8::Isolate*>(isolate)->Dispose();
}
} // namespace heap
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment