Commit 555c9619 authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Switch to 500k pages (patchset #24 id:780001 of...

Revert of [heap] Switch to 500k pages (patchset #24 id:780001 of https://codereview.chromium.org/2013713003/ )

Reason for revert:
Failures on waterfall:

e.g. http://build.chromium.org/p/client.v8/builders/V8%20Linux64%20TSAN/builds/11134

Original issue's description:
> [heap] Switch to 500k pages
>
> - Decrease regular heap object size to 300k, keeping the same ration (60%)
>   between this limit and page size.
>
> In a follow up, we can now get rid of the new space border page while
> keeping the 1M minimum new space size.
>
> Some results (v8.infinite_scroll; 3 runs):
> - evacuate.avg: +15.3% (1.4->1.2)
> - evacuate.max: +24.4% (2.4->1.8)
>
> BUG=chromium:581412
> LOG=N
> R=hpayer@chromium.org, ulan@chromium.org, yangguo@chromium.org
>
> Committed: https://crrev.com/ffe5c670e1559d11e7b252e15fec38765e7dbe4f
> Cr-Commit-Position: refs/heads/master@{#38533}

TBR=hpayer@chromium.org,ulan@chromium.org,yangguo@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:581412

Review-Url: https://codereview.chromium.org/2229403003
Cr-Commit-Position: refs/heads/master@{#38537}
parent cbe5d41d
......@@ -200,7 +200,7 @@
// Bump up for Power Linux due to larger (64K) page size.
const int kPageSizeBits = 22;
#else
const int kPageSizeBits = 19;
const int kPageSizeBits = 20;
#endif
#endif // V8_BASE_BUILD_CONFIG_H_
......@@ -77,7 +77,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
......@@ -5427,17 +5427,15 @@ void Heap::PrintAlloctionsHash() {
void Heap::NotifyDeserializationComplete() {
deserialization_complete_ = true;
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
PagedSpaces spaces(this);
for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
if (isolate()->snapshot_available()) s->ShrinkPagesToHighWaterMark();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) {
CHECK(p->NeverEvacuate());
}
#endif // DEBUG
}
#endif // DEBUG
}
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
......
......@@ -600,7 +600,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
......@@ -608,10 +608,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const int kMaxEvacuatedBytes = 4 * MB;
const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = 0.5;
const int kTargetMsPerArea = 1;
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
......@@ -3218,7 +3218,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
const double kTargetCompactionTimeInMs = .5;
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;
double compaction_speed =
......
......@@ -595,21 +595,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
chunk->area_end_ -= bytes_to_shrink;
UncommitBlock(free_start, bytes_to_shrink);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
}
}
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
......@@ -1228,50 +1213,17 @@ Object* PagedSpace::FindObject(Address addr) {
return Smi::FromInt(0);
}
void PagedSpace::ShrinkPagesToHighWaterMark() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
EmptyAllocationInfo();
ResetFreeList();
for (Page* page : *this) {
// Only shrink immortal immovable pages after deserialization.
if (!page->IsFlagSet(Page::NEVER_EVACUATE)) continue;
// In order to shrink the page, we need to find the last filler. Since
// a GC could've happened we need to manually traverse the page to find
// any free space at the end.
HeapObjectIterator it(page);
HeapObject* filler = nullptr;
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
filler = HeapObject::FromAddress(obj->address() + obj->Size());
}
if (filler == nullptr || filler->address() == page->area_end()) continue;
CHECK(filler->IsFiller());
if (!filler->IsFreeSpace()) continue;
size_t unused =
RoundDown(static_cast<size_t>(page->area_end() - filler->address() -
FreeSpace::kSize),
base::OS::CommitPageSize());
if (unused > 0) {
heap()->CreateFillerObjectAt(
filler->address(),
static_cast<int>(page->area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
heap()->memory_allocator()->ShrinkChunk(page, unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), page->area_end());
accounting_stats_.DecreaseCapacity(unused);
AccountUncommitted(unused);
}
bool PagedSpace::Expand() {
int size = AreaSize();
if (snapshotable() && !HasPages()) {
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
}
bool PagedSpace::Expand() {
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
if (p == nullptr) return false;
AccountCommitted(static_cast<intptr_t>(p->size()));
// Pages created during bootstrapping may contain immortal immovable objects.
......@@ -1352,6 +1304,7 @@ void PagedSpace::IncreaseCapacity(int size) {
void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->LiveBytes(), 0);
DCHECK_EQ(AreaSize(), page->area_size());
DCHECK_EQ(page->owner(), this);
free_list_.EvictFreeListItems(page);
......@@ -1371,7 +1324,7 @@ void PagedSpace::ReleasePage(Page* page) {
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
DCHECK(Capacity() > 0);
accounting_stats_.ShrinkSpace(page->area_size());
accounting_stats_.ShrinkSpace(AreaSize());
}
#ifdef DEBUG
......
......@@ -689,7 +689,7 @@ class Page : public MemoryChunk {
// account.
// TODO(hpayer): This limit should be way smaller but we currently have
// short living objects >256K.
static const int kMaxRegularHeapObjectSize = 300 * KB;
static const int kMaxRegularHeapObjectSize = 600 * KB;
static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
......@@ -1270,8 +1270,6 @@ class MemoryAllocator {
intptr_t commit_area_size,
Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
......@@ -2142,10 +2140,6 @@ class PagedSpace : public Space {
iterator begin() { return iterator(anchor_.next_page()); }
iterator end() { return iterator(&anchor_); }
// Shrink all pages of the space to be exactly the size needed using the
// high water mark.
void ShrinkPagesToHighWaterMark();
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......
......@@ -4712,7 +4712,6 @@ class FreeSpace: public HeapObject {
// Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize;
static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
static const int kSize = kNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
......@@ -10467,12 +10466,12 @@ class JSArray: public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
// 300 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
// 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
// we do not want to include in objects.h
// Note that Page::kMaxRegularHeapObjectSize has to be in sync with
// kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
static const int kInitialMaxFastElementArray =
(300 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
(600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
kPointerSize;
private:
......
......@@ -31,6 +31,19 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
return index < num_contexts;
}
uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
if (!isolate->snapshot_available()) {
return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
}
uint32_t size;
int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size);
return size;
}
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
base::ElapsedTimer timer;
......@@ -76,8 +89,25 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return Handle<Context>::cast(result);
}
void ProfileDeserialization(const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots) {
void UpdateMaxRequirementPerPage(
uint32_t* requirements,
Vector<const SerializedData::Reservation> reservations) {
int space = 0;
uint32_t current_requirement = 0;
for (const auto& reservation : reservations) {
current_requirement += reservation.chunk_size();
if (reservation.is_last()) {
requirements[space] = std::max(requirements[space], current_requirement);
current_requirement = 0;
space++;
}
}
DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
}
void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots,
uint32_t* sizes_out) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
PrintF("Deserialization will reserve:\n");
......@@ -93,6 +123,36 @@ void ProfileDeserialization(const SnapshotData* startup_snapshot,
PrintF("%10d bytes per context #%d\n", context_total, i);
}
}
uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
startup_requirements[space] = 0;
context_requirements[space] = 0;
}
UpdateMaxRequirementPerPage(startup_requirements,
startup_snapshot->Reservations());
for (const auto& context_snapshot : *context_snapshots) {
UpdateMaxRequirementPerPage(context_requirements,
context_snapshot->Reservations());
}
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
// If the space requirement for a page is less than a page size, we consider
// limiting the size of the first page in order to save memory on startup.
uint32_t required = startup_requirements[space] +
2 * context_requirements[space] +
Page::kObjectStartOffset;
// Add a small allowance to the code space for small scripts.
if (space == CODE_SPACE) required += 32 * KB;
if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
uint32_t max_size =
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
}
}
}
v8::StartupData Snapshot::CreateSnapshotBlob(
......@@ -106,9 +166,13 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
total_length += context_snapshot->RawData().length();
}
ProfileDeserialization(startup_snapshot, context_snapshots);
uint32_t first_page_sizes[kNumPagedSpaces];
CalculateFirstPageSizes(startup_snapshot, context_snapshots,
first_page_sizes);
char* data = new char[total_length];
memcpy(data + kFirstPageSizesOffset, first_page_sizes,
kNumPagedSpaces * kInt32Size);
memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
int payload_offset = StartupSnapshotOffset(num_contexts);
int payload_length = startup_snapshot->RawData().length();
......
......@@ -67,6 +67,9 @@ class Snapshot : public AllStatic {
static bool EmbedsScript(Isolate* isolate);
static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
......@@ -85,16 +88,21 @@ class Snapshot : public AllStatic {
int index);
// Snapshot blob layout:
// [0] number of contexts N
// [1] offset to context 0
// [2] offset to context 1
// [0 - 5] pre-calculated first page sizes for paged spaces
// [6] number of contexts N
// [7] offset to context 0
// [8] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
// ... context 0 snapshot data
// ... context 1 snapshot data
static const int kNumberOfContextsOffset = 0;
static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
static const int kFirstPageSizesOffset = 0;
static const int kNumberOfContextsOffset =
kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
static const int kFirstContextOffsetOffset =
kNumberOfContextsOffset + kInt32Size;
......
......@@ -2228,18 +2228,6 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
// Test the case where allocation must be done from the free list, so filler
// may precede or follow the object.
TEST(TestAlignedOverAllocation) {
Heap* heap = CcTest::heap();
// Test checks for fillers before and behind objects and requires a fresh
// page and empty free list.
heap::AbandonCurrentlyFreeMemory(heap->old_space());
// Allocate a dummy object to properly set up the linear allocation info.
AllocationResult dummy =
heap->old_space()->AllocateRawUnaligned(kPointerSize);
CHECK(!dummy.IsRetry());
heap->CreateFillerObjectAt(
HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize,
ClearRecordedSlots::kNo);
// Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
const intptr_t double_misalignment = kDoubleSize - kPointerSize;
Address start;
......@@ -3623,12 +3611,8 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
// If there's snapshot available, we don't know whether 20 small arrays will
// fit on the initial pages.
if (!isolate->snapshot_available()) return;
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
static const int number_of_test_pages = 20;
......@@ -3658,8 +3642,14 @@ TEST(ReleaseOverReservedPages) {
"triggered by test 2");
CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
// OS so that other processes can seize the memory. If we get a failure here
// where there are 2 pages left instead of 1, then we should increase the
// size of the first page a little in SizeOfFirstPage in spaces.cc. The
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
heap->CollectAllAvailableGarbage("triggered really hard");
// Triggering a last-resort GC should release all additional pages.
CHECK_EQ(initial_page_count, old_space->CountTotalPages());
}
......
......@@ -478,7 +478,8 @@ TEST(LargeObjectSpace) {
CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
}
TEST(SizeOfInitialHeap) {
TEST(SizeOfFirstPageIsLargeEnough) {
if (i::FLAG_always_opt) return;
// Bootstrapping without a snapshot causes more allocations.
CcTest::InitializeVM();
......@@ -494,31 +495,22 @@ TEST(SizeOfInitialHeap) {
return;
}
// The limit for each space for an empty isolate containing just the
// snapshot.
const size_t kMaxInitialSizePerSpace = 1536 * KB; // 1.5MB
// If this test fails due to enabling experimental natives that are not part
// of the snapshot, we may need to adjust CalculateFirstPageSizes.
// Freshly initialized VM gets by with the snapshot size (which is below
// kMaxInitialSizePerSpace per space).
Heap* heap = isolate->heap();
int page_count[LAST_PAGED_SPACE + 1];
// Freshly initialized VM gets by with one page per space.
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
CHECK_LT(static_cast<size_t>(heap->paged_space(i)->CommittedMemory()),
kMaxInitialSizePerSpace);
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
}
// Executing the empty script gets by with the same number of pages, i.e.,
// requires no extra space.
// Executing the empty script gets by with one page per space.
CompileRun("/*empty*/");
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages());
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
}
// No large objects required to perform the above steps.
......
......@@ -1177,13 +1177,13 @@ TEST(CodeSerializerThreeBigStrings) {
Vector<const uint8_t> source_b =
ConstructSource(STATIC_CHAR_VECTOR("var b = \""), STATIC_CHAR_VECTOR("b"),
STATIC_CHAR_VECTOR("\";"), 300000);
STATIC_CHAR_VECTOR("\";"), 600000);
Handle<String> source_b_str =
f->NewStringFromOneByte(source_b).ToHandleChecked();
Vector<const uint8_t> source_c =
ConstructSource(STATIC_CHAR_VECTOR("var c = \""), STATIC_CHAR_VECTOR("c"),
STATIC_CHAR_VECTOR("\";"), 300000);
STATIC_CHAR_VECTOR("\";"), 500000);
Handle<String> source_c_str =
f->NewStringFromOneByte(source_c).ToHandleChecked();
......@@ -1216,10 +1216,10 @@ TEST(CodeSerializerThreeBigStrings) {
v8::Maybe<int32_t> result =
CompileRun("(a + b).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
CHECK_EQ(300000 + 700000, result.FromJust());
CHECK_EQ(600000 + 700000, result.FromJust());
result = CompileRun("(b + c).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
CHECK_EQ(300000 + 300000, result.FromJust());
CHECK_EQ(500000 + 600000, result.FromJust());
Heap* heap = isolate->heap();
v8::Local<v8::String> result_str =
CompileRun("a")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment