Commit 4c806802 authored by erikcorry's avatar erikcorry Committed by Commit bot

Fix OOM bug 3976.

Also introduce --trace-fragmentation-verbose, and fix --always-compact.

R=ulan@chromium.org
BUG=v8:3976
LOG=y

Review URL: https://codereview.chromium.org/1024823002

Cr-Commit-Position: refs/heads/master@{#27414}
parent 6e75e34d
......@@ -594,8 +594,9 @@ DEFINE_BOOL(print_max_heap_committed, false,
"in name=value format on exit")
DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_BOOL(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
DEFINE_BOOL(trace_fragmentation_verbose, false,
"report fragmentation for old space (detailed)")
DEFINE_BOOL(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
......
......@@ -599,11 +599,11 @@ const char* AllocationSpaceName(AllocationSpace space) {
static int FreeListFragmentation(PagedSpace* space, Page* p) {
// If page was not swept then there are no free list items on it.
if (!p->WasSwept()) {
if (FLAG_trace_fragmentation) {
if (FLAG_trace_fragmentation_verbose) {
PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
AllocationSpaceName(space->identity()), p->LiveBytes());
}
return 0;
return FLAG_always_compact ? 1 : 0;
}
PagedSpace::SizeStats sizes;
......@@ -620,7 +620,7 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
ratio_threshold = 15;
}
if (FLAG_trace_fragmentation) {
if (FLAG_trace_fragmentation_verbose) {
PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
static_cast<int>(sizes.small_size_),
......@@ -696,6 +696,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
max_evacuation_candidates *= 2;
}
if (FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
}
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
PrintF(
"Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
......@@ -709,6 +713,11 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Candidate candidates[kMaxMaxEvacuationCandidates];
if (FLAG_trace_fragmentation &&
max_evacuation_candidates >= kMaxMaxEvacuationCandidates) {
PrintF("Hit max page compaction limit of %d pages\n",
kMaxMaxEvacuationCandidates);
}
max_evacuation_candidates =
Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
......@@ -731,7 +740,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
unsigned int counter = space->heap()->ms_count();
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
} else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) {
// Don't try to release too many pages.
if (estimated_release >= over_reserved) {
continue;
......@@ -756,7 +765,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
fragmentation = 0;
}
if (FLAG_trace_fragmentation) {
if (FLAG_trace_fragmentation_verbose) {
PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
AllocationSpaceName(space->identity()),
static_cast<int>(free_bytes),
......@@ -3242,6 +3251,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
int abandoned_pages = 0;
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
......@@ -3257,7 +3267,7 @@ void MarkCompactCollector::EvacuatePages() {
if (p->IsEvacuationCandidate()) {
// During compaction we might have to request a new page. Check that we
// have an emergency page and the space still has room for that.
if (space->HasEmergencyMemory() && space->CanExpand()) {
if (space->HasEmergencyMemory() || space->CanExpand()) {
EvacuateLiveObjectsFromPage(p);
// Unlink the page from the list of pages here. We must not iterate
// over that page later (e.g. when scan on scavenge pages are
......@@ -3273,6 +3283,7 @@ void MarkCompactCollector::EvacuatePages() {
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
}
abandoned_pages = npages - i;
break;
}
}
......@@ -3286,6 +3297,16 @@ void MarkCompactCollector::EvacuatePages() {
space->FreeEmergencyMemory();
}
}
if (FLAG_trace_fragmentation) {
if (abandoned_pages != 0) {
PrintF(
" Abandon %d out of %d page defragmentations due to lack of "
"memory\n",
abandoned_pages, npages);
} else {
PrintF(" Defragmented %d pages\n", npages);
}
}
}
}
......@@ -3629,7 +3650,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
code_slots_filtering_required);
if (FLAG_trace_fragmentation) {
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
}
......@@ -3664,7 +3685,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
if (p->IsEvacuationCandidate()) {
SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
code_slots_filtering_required);
if (FLAG_trace_fragmentation) {
if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
......
......@@ -1019,7 +1019,8 @@ bool PagedSpace::CanExpand() {
DCHECK(max_capacity_ % AreaSize() == 0);
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
DCHECK(heap()->CommittedOldGenerationMemory() <=
heap()->MaxOldGenerationSize());
heap()->MaxOldGenerationSize() +
PagedSpace::MaxEmergencyMemoryAllocated());
// Are we going to exceed capacity for this space?
if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
......@@ -1046,7 +1047,8 @@ bool PagedSpace::Expand() {
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
DCHECK(heap()->CommittedOldGenerationMemory() <=
heap()->MaxOldGenerationSize());
heap()->MaxOldGenerationSize() +
PagedSpace::MaxEmergencyMemoryAllocated());
p->InsertAfter(anchor_.prev_page());
......@@ -1128,6 +1130,15 @@ void PagedSpace::ReleasePage(Page* page) {
}
intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
// New space and large object space.
static const int spaces_without_emergency_memory = 2;
static const int spaces_with_emergency_memory =
LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
return Page::kPageSize * spaces_with_emergency_memory;
}
void PagedSpace::CreateEmergencyMemory() {
if (identity() == CODE_SPACE) {
// Make the emergency block available to the allocator.
......
......@@ -1871,6 +1871,7 @@ class PagedSpace : public Space {
void CreateEmergencyMemory();
void FreeEmergencyMemory();
void UseEmergencyMemory();
intptr_t MaxEmergencyMemoryAllocated();
bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
......
......@@ -138,6 +138,7 @@
# Very slow on ARM and MIPS, contains no architecture dependent code.
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
##############################################################################
# This test expects to reach a certain recursion depth, which may not work
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --max-old-space-size=60
table = [];
for (var i = 0; i < 32; i++) {
table[i] = String.fromCharCode(i + 0x410);
}
var random = (function() {
var seed = 10;
return function() {
seed = (seed * 1009) % 8831;
return seed;
};
})();
function key(length) {
var s = "";
for (var i = 0; i < length; i++) {
s += table[random() % 32];
}
return '"' + s + '"';
}
function value() {
return '[{' + '"field1" : ' + random() + ', "field2" : ' + random() + '}]';
}
function generate(n) {
var s = '{';
for (var i = 0; i < n; i++) {
if (i > 0) s += ', ';
s += key(random() % 10 + 7);
s += ':';
s += value();
}
s += '}';
return s;
}
print("generating");
var str = generate(50000);
print("parsing " + str.length);
JSON.parse(str);
print("done");
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment