Commit 332bd5e9 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [heap] Switch to 500k pages (patchset #5 id:80001 of...

Revert of [heap] Switch to 500k pages (patchset #5 id:80001 of https://codereview.chromium.org/2314803002/ )

Reason for revert:
Breaks arm64 nosnap debug:
https://build.chromium.org/p/client.v8.ports/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20nosnap%20-%20debug/builds/2178

Original issue's description:
> [heap] Switch to 500k pages
>
> BUG=chromium:636331
> R=ulan@chromium.org
>
> Committed: https://crrev.com/4b618dbf8ec7f0edf377b54b48bf3c852d5e235a
> Cr-Commit-Position: refs/heads/master@{#39220}

TBR=ulan@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:636331

Review-Url: https://codereview.chromium.org/2312853005
Cr-Commit-Position: refs/heads/master@{#39229}
parent 3b083983
......@@ -208,7 +208,7 @@
// Bump up for Power Linux due to larger (64K) page size.
const int kPageSizeBits = 22;
#else
const int kPageSizeBits = 19;
const int kPageSizeBits = 20;
#endif
#endif // V8_BASE_BUILD_CONFIG_H_
......@@ -201,9 +201,7 @@ const int kExternalAllocationSoftLimit = 64 * MB;
// memory. This also applies to new space allocation, since objects are never
// migrated from new space to large object space. Takes double alignment into
// account.
//
// Current value: Page::kAllocatableMemory (on 32-bit arch) - 512 (slack).
const int kMaxRegularHeapObjectSize = 507136;
const int kMaxRegularHeapObjectSize = 512 * KB - 512;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
......
......@@ -78,7 +78,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
......
......@@ -600,7 +600,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
......@@ -608,10 +608,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const int kMaxEvacuatedBytes = 4 * MB;
const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = .5;
const int kTargetMsPerArea = 1;
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
......@@ -3226,7 +3226,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
const double kTargetCompactionTimeInMs = .5;
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;
double compaction_speed =
......
......@@ -696,8 +696,7 @@ class MemoryChunk {
DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
STATIC_ASSERT(kMaxRegularHeapObjectSize < MemoryChunk::kAllocatableMemory);
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
......
......@@ -2369,9 +2369,6 @@ static void FillUpNewSpace(NewSpace* new_space) {
TEST(GrowAndShrinkNewSpace) {
// Avoid shrinking new space in GC epilogue. This can happen if allocation
// throughput samples have been taken while executing the benchmark.
FLAG_predictable = true;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
NewSpace* new_space = heap->new_space();
......@@ -3617,9 +3614,6 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
// If there's snapshot available, we don't know whether 20 small arrays will
// fit on the initial pages.
if (!isolate->snapshot_available()) return;
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
......
......@@ -96,9 +96,6 @@ TEST(Promotion) {
}
HEAP_TEST(NoPromotion) {
// Page promotion allows pages to be moved to old space even in the case of
// OOM scenarios.
FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment