Adapt fragmentation heuristics for over reserved pages.

This adapts the heuristics that detect fragmented pages to reduce memory
footprint for spaces with over reserved memory. This minimizes external
fragmentation caused by pages that cannot be released to the OS because
of just a few live objects on them.

R=erik.corry@gmail.com
TEST=cctest/test-heap/ReleaseOverReservedPages

Review URL: https://chromiumcodereview.appspot.com/10629004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11901 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3372b08c
......@@ -500,12 +500,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
space->identity() == OLD_DATA_SPACE ||
space->identity() == CODE_SPACE);
static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
const int kMaxMaxEvacuationCandidates = 1000;
int max_evacuation_candidates = Min(
kMaxMaxEvacuationCandidates,
static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
int max_evacuation_candidates =
static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
......@@ -535,17 +533,28 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50;
if (over_reserved >= 2 * space->AreaSize() &&
reduce_memory_footprint_) {
mode = REDUCE_MEMORY_FOOTPRINT;
if (over_reserved >= 2 * space->AreaSize()) {
// We expect that empty pages are easier to compact so slightly bump the
// limit.
// If reduction of memory footprint was requested, we are aggressive
// about choosing pages to free. We expect that half-empty pages
// are easier to compact so slightly bump the limit.
if (reduce_memory_footprint_) {
mode = REDUCE_MEMORY_FOOTPRINT;
max_evacuation_candidates += 2;
}
if (FLAG_trace_fragmentation) {
PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
// If over-usage is very high (more than a third of the space), we
// try to free all mostly empty pages. We expect that almost empty
// pages are even easier to compact so bump the limit even more.
if (over_reserved > reserved / 3) {
mode = REDUCE_MEMORY_FOOTPRINT;
max_evacuation_candidates *= 2;
}
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
static_cast<double>(over_reserved) / MB,
static_cast<double>(reserved) / MB,
static_cast<int>(kFreenessThreshold));
}
}
......@@ -554,6 +563,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Candidate candidates[kMaxMaxEvacuationCandidates];
max_evacuation_candidates =
Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
int count = 0;
int fragmentation = 0;
Candidate* least = NULL;
......
......@@ -34,7 +34,8 @@
using namespace v8::internal;
static inline void SimulateFullSpace(PagedSpace* space) {
// Also used in test-heap.cc test cases.
void SimulateFullSpace(PagedSpace* space) {
int old_linear_size = static_cast<int>(space->limit() - space->top());
space->Free(space->top(), old_linear_size);
space->SetTop(space->limit(), space->limit());
......
......@@ -1899,3 +1899,42 @@ TEST(Regress2143b) {
CHECK(root->IsJSObject());
CHECK(root->map()->IsMap());
}
// Implemented in the test-alloc.cc test suite.
void SimulateFullSpace(PagedSpace* space);
TEST(ReleaseOverReservedPages) {
i::FLAG_trace_gc = true;
InitializeVM();
v8::HandleScope scope;
static const int number_of_test_pages = 20;
// Prepare many pages with low live-bytes count.
PagedSpace* old_pointer_space = HEAP->old_pointer_space();
CHECK_EQ(1, old_pointer_space->CountTotalPages());
for (int i = 0; i < number_of_test_pages; i++) {
AlwaysAllocateScope always_allocate;
SimulateFullSpace(old_pointer_space);
FACTORY->NewFixedArray(1, TENURED);
}
CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released
// to the OS so that other processes can seize the memory.
HEAP->CollectAllAvailableGarbage("triggered really hard");
CHECK_EQ(1, old_pointer_space->CountTotalPages());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment