Commit 4b618dbf authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Switch to 500k pages

BUG=chromium:636331
R=ulan@chromium.org

Review-Url: https://codereview.chromium.org/2314803002
Cr-Commit-Position: refs/heads/master@{#39220}
parent 7bc200c7
...@@ -208,7 +208,7 @@ ...@@ -208,7 +208,7 @@
// Bump up for Power Linux due to larger (64K) page size. // Bump up for Power Linux due to larger (64K) page size.
const int kPageSizeBits = 22; const int kPageSizeBits = 22;
#else #else
const int kPageSizeBits = 20; const int kPageSizeBits = 19;
#endif #endif
#endif // V8_BASE_BUILD_CONFIG_H_ #endif // V8_BASE_BUILD_CONFIG_H_
...@@ -201,7 +201,9 @@ const int kExternalAllocationSoftLimit = 64 * MB; ...@@ -201,7 +201,9 @@ const int kExternalAllocationSoftLimit = 64 * MB;
// memory. This also applies to new space allocation, since objects are never // memory. This also applies to new space allocation, since objects are never
// migrated from new space to large object space. Takes double alignment into // migrated from new space to large object space. Takes double alignment into
// account. // account.
const int kMaxRegularHeapObjectSize = 512 * KB - 512; //
// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
const int kMaxRegularHeapObjectSize = 507136;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2)); STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
......
...@@ -78,7 +78,7 @@ Heap::Heap() ...@@ -78,7 +78,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should // semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize. // be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB), max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize), initial_semispace_size_(MB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB), max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ / initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor), kInitalOldGenerationLimitFactor),
......
...@@ -600,7 +600,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics( ...@@ -600,7 +600,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// For memory reducing and optimize for memory mode we directly define both // For memory reducing and optimize for memory mode we directly define both
// constants. // constants.
const int kTargetFragmentationPercentForReduceMemory = 20; const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20; const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB; const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
...@@ -608,10 +608,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics( ...@@ -608,10 +608,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// defaults to start and switch to a trace-based (using compaction speed) // defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples. // approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70; const int kTargetFragmentationPercent = 70;
const int kMaxEvacuatedBytes = 4 * Page::kPageSize; const int kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there // Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples. // exist enough compaction speed samples.
const int kTargetMsPerArea = 1; const float kTargetMsPerArea = .5;
if (heap()->ShouldReduceMemory()) { if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
...@@ -3226,7 +3226,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, ...@@ -3226,7 +3226,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by: // The number of parallel compaction tasks is limited by:
// - #evacuation pages // - #evacuation pages
// - (#cores - 1) // - (#cores - 1)
const double kTargetCompactionTimeInMs = 1; const double kTargetCompactionTimeInMs = .5;
const int kNumSweepingTasks = 3; const int kNumSweepingTasks = 3;
double compaction_speed = double compaction_speed =
......
...@@ -696,7 +696,8 @@ class MemoryChunk { ...@@ -696,7 +696,8 @@ class MemoryChunk {
DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags) DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
STATIC_ASSERT(kMaxRegularHeapObjectSize < MemoryChunk::kAllocatableMemory); static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger. // A page is a memory chunk of a size 1MB. Large object pages may be larger.
......
...@@ -2369,6 +2369,9 @@ static void FillUpNewSpace(NewSpace* new_space) { ...@@ -2369,6 +2369,9 @@ static void FillUpNewSpace(NewSpace* new_space) {
TEST(GrowAndShrinkNewSpace) { TEST(GrowAndShrinkNewSpace) {
// Avoid shrinking new space in GC epilogue. This can happen if allocation
// throughput samples have been taken while executing the benchmark.
FLAG_predictable = true;
CcTest::InitializeVM(); CcTest::InitializeVM();
Heap* heap = CcTest::heap(); Heap* heap = CcTest::heap();
NewSpace* new_space = heap->new_space(); NewSpace* new_space = heap->new_space();
...@@ -3614,6 +3617,9 @@ TEST(ReleaseOverReservedPages) { ...@@ -3614,6 +3617,9 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_page_promotion = false; i::FLAG_page_promotion = false;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
// If there's snapshot available, we don't know whether 20 small arrays will
// fit on the initial pages.
if (!isolate->snapshot_available()) return;
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate()); v8::HandleScope scope(CcTest::isolate());
......
...@@ -96,6 +96,9 @@ TEST(Promotion) { ...@@ -96,6 +96,9 @@ TEST(Promotion) {
} }
HEAP_TEST(NoPromotion) { HEAP_TEST(NoPromotion) {
// Page promotion allows pages to be moved to old space even in the case of
// OOM scenarios.
FLAG_page_promotion = false;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment