Commit ed8791ea authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap,snapshot] Replace first page size from snapshots with page trimming

Replace first page size in the snapshots with a heap logic that trims pages
after deserialization. The snapshot provided page sizes was just an
approximation, while the heap knows exactly where to trim.

Furthermore, trim the pages directly after deserialization, leaving no wiggle
room for further objects. This avoids pollution of the immortal immovable pages
with regular objects, e.g. Contexts. The downside is that we potentially require
expanding the space with a new page.

BUG=chromium:636331

Review-Url: https://codereview.chromium.org/2311963002
Cr-Commit-Position: refs/heads/master@{#39200}
parent 1a5f8fa5
...@@ -5471,16 +5471,19 @@ void Heap::PrintAlloctionsHash() { ...@@ -5471,16 +5471,19 @@ void Heap::PrintAlloctionsHash() {
void Heap::NotifyDeserializationComplete() { void Heap::NotifyDeserializationComplete() {
deserialization_complete_ = true; DCHECK_EQ(0, gc_count());
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
PagedSpaces spaces(this); PagedSpaces spaces(this);
for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) { for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) { for (Page* p : *s) {
CHECK(p->NeverEvacuate()); CHECK(p->NeverEvacuate());
} }
}
#endif // DEBUG #endif // DEBUG
}
deserialization_complete_ = true;
} }
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
......
...@@ -616,6 +616,21 @@ void MemoryChunk::Unlink() { ...@@ -616,6 +616,21 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL); set_next_chunk(NULL);
} }
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
chunk->area_end_ -= bytes_to_shrink;
UncommitBlock(free_start, bytes_to_shrink);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
}
}
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size, intptr_t commit_area_size,
...@@ -763,6 +778,47 @@ void Page::ResetFreeListStatistics() { ...@@ -763,6 +778,47 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0; available_in_free_list_ = 0;
} }
size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
if (filler->address() == area_end()) return 0;
CHECK(filler->IsFiller());
if (!filler->IsFreeSpace()) return 0;
#ifdef DEBUG
// Check the the filler is indeed the last filler on the page.
HeapObjectIterator it(this);
HeapObject* filler2 = nullptr;
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
}
if (filler2 == nullptr || filler2->address() == area_end()) return 0;
DCHECK(filler2->IsFiller());
DCHECK_EQ(filler->address(), filler2->address());
#endif // DEBUG
size_t unused = RoundDown(
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
base::OS::CommitPageSize());
if (unused > 0) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
reinterpret_cast<void*>(area_end()),
reinterpret_cast<void*>(area_end() - unused));
}
heap()->CreateFillerObjectAt(
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
heap()->memory_allocator()->ShrinkChunk(this, unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
return unused;
}
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) { Address start_free) {
// We do not allow partial shrink for code. // We do not allow partial shrink for code.
...@@ -1234,11 +1290,22 @@ Object* PagedSpace::FindObject(Address addr) { ...@@ -1234,11 +1290,22 @@ Object* PagedSpace::FindObject(Address addr) {
return Smi::FromInt(0); return Smi::FromInt(0);
} }
bool PagedSpace::Expand() { void PagedSpace::ShrinkImmortalImmovablePages() {
int size = AreaSize(); DCHECK(!heap()->deserialization_complete());
if (snapshotable() && !HasPages()) { MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); EmptyAllocationInfo();
ResetFreeList();
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
} }
}
bool PagedSpace::Expand() {
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false; if (!heap()->CanExpandOldGeneration(size)) return false;
...@@ -1335,7 +1402,6 @@ void PagedSpace::IncreaseCapacity(size_t bytes) { ...@@ -1335,7 +1402,6 @@ void PagedSpace::IncreaseCapacity(size_t bytes) {
void PagedSpace::ReleasePage(Page* page) { void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->LiveBytes(), 0); DCHECK_EQ(page->LiveBytes(), 0);
DCHECK_EQ(AreaSize(), page->area_size());
DCHECK_EQ(page->owner(), this); DCHECK_EQ(page->owner(), this);
free_list_.EvictFreeListItems(page); free_list_.EvictFreeListItems(page);
...@@ -1354,10 +1420,8 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -1354,10 +1420,8 @@ void PagedSpace::ReleasePage(Page* page) {
} }
AccountUncommitted(static_cast<intptr_t>(page->size())); AccountUncommitted(static_cast<intptr_t>(page->size()));
accounting_stats_.ShrinkSpace(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
DCHECK(Capacity() > 0);
accounting_stats_.ShrinkSpace(AreaSize());
} }
#ifdef DEBUG #ifdef DEBUG
......
...@@ -817,6 +817,8 @@ class Page : public MemoryChunk { ...@@ -817,6 +817,8 @@ class Page : public MemoryChunk {
available_in_free_list_.Increment(available); available_in_free_list_.Increment(available);
} }
size_t ShrinkToHighWaterMark();
#ifdef DEBUG #ifdef DEBUG
void Print(); void Print();
#endif // DEBUG #endif // DEBUG
...@@ -1305,6 +1307,8 @@ class MemoryAllocator { ...@@ -1305,6 +1307,8 @@ class MemoryAllocator {
intptr_t commit_area_size, intptr_t commit_area_size,
Executability executable, Space* space); Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
Address ReserveAlignedMemory(size_t requested, size_t alignment, Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller); base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
...@@ -2186,6 +2190,10 @@ class PagedSpace : public Space { ...@@ -2186,6 +2190,10 @@ class PagedSpace : public Space {
iterator begin() { return iterator(anchor_.next_page()); } iterator begin() { return iterator(anchor_.next_page()); }
iterator end() { return iterator(&anchor_); } iterator end() { return iterator(&anchor_); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();
protected: protected:
// PagedSpaces that should be included in snapshots have different, i.e., // PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages. // smaller, initial pages.
......
...@@ -2380,13 +2380,19 @@ bool Isolate::Init(Deserializer* des) { ...@@ -2380,13 +2380,19 @@ bool Isolate::Init(Deserializer* des) {
runtime_profiler_ = new RuntimeProfiler(this); runtime_profiler_ = new RuntimeProfiler(this);
// If we are deserializing, read the state into the now-empty heap. // If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) { {
des->Deserialize(this); AlwaysAllocateScope always_allocate(this);
}
load_stub_cache_->Initialize(); if (!create_heap_objects) {
store_stub_cache_->Initialize(); des->Deserialize(this);
if (FLAG_ignition || serializer_enabled()) { }
interpreter_->Initialize(); load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
if (FLAG_ignition || serializer_enabled()) {
interpreter_->Initialize();
}
heap_.NotifyDeserializationComplete();
} }
// Finish initialization of ThreadLocal after deserialization is done. // Finish initialization of ThreadLocal after deserialization is done.
...@@ -2417,8 +2423,6 @@ bool Isolate::Init(Deserializer* des) { ...@@ -2417,8 +2423,6 @@ bool Isolate::Init(Deserializer* des) {
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs(); time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
heap_.NotifyDeserializationComplete();
if (!create_heap_objects) { if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the // Now that the heap is consistent, it's OK to generate the code for the
// deopt entry table that might have been referred to by optimized code in // deopt entry table that might have been referred to by optimized code in
......
...@@ -4814,6 +4814,7 @@ class FreeSpace: public HeapObject { ...@@ -4814,6 +4814,7 @@ class FreeSpace: public HeapObject {
// Size is smi tagged when it is stored. // Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize; static const int kSizeOffset = HeapObject::kHeaderSize;
static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize); static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
static const int kSize = kNextOffset + kPointerSize;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace); DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
......
...@@ -31,19 +31,6 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) { ...@@ -31,19 +31,6 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
return index < num_contexts; return index < num_contexts;
} }
uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
if (!isolate->snapshot_available()) {
return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
}
uint32_t size;
int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size);
return size;
}
bool Snapshot::Initialize(Isolate* isolate) { bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false; if (!isolate->snapshot_available()) return false;
base::ElapsedTimer timer; base::ElapsedTimer timer;
...@@ -89,25 +76,8 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot( ...@@ -89,25 +76,8 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return Handle<Context>::cast(result); return Handle<Context>::cast(result);
} }
void UpdateMaxRequirementPerPage( void ProfileDeserialization(const SnapshotData* startup_snapshot,
uint32_t* requirements, const List<SnapshotData*>* context_snapshots) {
Vector<const SerializedData::Reservation> reservations) {
int space = 0;
uint32_t current_requirement = 0;
for (const auto& reservation : reservations) {
current_requirement += reservation.chunk_size();
if (reservation.is_last()) {
requirements[space] = std::max(requirements[space], current_requirement);
current_requirement = 0;
space++;
}
}
DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
}
void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots,
uint32_t* sizes_out) {
if (FLAG_profile_deserialization) { if (FLAG_profile_deserialization) {
int startup_total = 0; int startup_total = 0;
PrintF("Deserialization will reserve:\n"); PrintF("Deserialization will reserve:\n");
...@@ -123,36 +93,6 @@ void CalculateFirstPageSizes(const SnapshotData* startup_snapshot, ...@@ -123,36 +93,6 @@ void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
PrintF("%10d bytes per context #%d\n", context_total, i); PrintF("%10d bytes per context #%d\n", context_total, i);
} }
} }
uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
startup_requirements[space] = 0;
context_requirements[space] = 0;
}
UpdateMaxRequirementPerPage(startup_requirements,
startup_snapshot->Reservations());
for (const auto& context_snapshot : *context_snapshots) {
UpdateMaxRequirementPerPage(context_requirements,
context_snapshot->Reservations());
}
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
// If the space requirement for a page is less than a page size, we consider
// limiting the size of the first page in order to save memory on startup.
uint32_t required = startup_requirements[space] +
2 * context_requirements[space] +
Page::kObjectStartOffset;
// Add a small allowance to the code space for small scripts.
if (space == CODE_SPACE) required += 32 * KB;
if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
uint32_t max_size =
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
}
}
} }
v8::StartupData Snapshot::CreateSnapshotBlob( v8::StartupData Snapshot::CreateSnapshotBlob(
...@@ -166,13 +106,9 @@ v8::StartupData Snapshot::CreateSnapshotBlob( ...@@ -166,13 +106,9 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
total_length += context_snapshot->RawData().length(); total_length += context_snapshot->RawData().length();
} }
uint32_t first_page_sizes[kNumPagedSpaces]; ProfileDeserialization(startup_snapshot, context_snapshots);
CalculateFirstPageSizes(startup_snapshot, context_snapshots,
first_page_sizes);
char* data = new char[total_length]; char* data = new char[total_length];
memcpy(data + kFirstPageSizesOffset, first_page_sizes,
kNumPagedSpaces * kInt32Size);
memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size); memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
int payload_offset = StartupSnapshotOffset(num_contexts); int payload_offset = StartupSnapshotOffset(num_contexts);
int payload_length = startup_snapshot->RawData().length(); int payload_length = startup_snapshot->RawData().length();
......
...@@ -67,9 +67,6 @@ class Snapshot : public AllStatic { ...@@ -67,9 +67,6 @@ class Snapshot : public AllStatic {
static bool EmbedsScript(Isolate* isolate); static bool EmbedsScript(Isolate* isolate);
static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
// To be implemented by the snapshot source. // To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob(); static const v8::StartupData* DefaultSnapshotBlob();
...@@ -88,21 +85,16 @@ class Snapshot : public AllStatic { ...@@ -88,21 +85,16 @@ class Snapshot : public AllStatic {
int index); int index);
// Snapshot blob layout: // Snapshot blob layout:
// [0 - 5] pre-calculated first page sizes for paged spaces // [0] number of contexts N
// [6] number of contexts N // [1] offset to context 0
// [7] offset to context 0 // [2] offset to context 1
// [8] offset to context 1
// ... // ...
// ... offset to context N - 1 // ... offset to context N - 1
// ... startup snapshot data // ... startup snapshot data
// ... context 0 snapshot data // ... context 0 snapshot data
// ... context 1 snapshot data // ... context 1 snapshot data
static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; static const int kNumberOfContextsOffset = 0;
static const int kFirstPageSizesOffset = 0;
static const int kNumberOfContextsOffset =
kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
static const int kFirstContextOffsetOffset = static const int kFirstContextOffsetOffset =
kNumberOfContextsOffset + kInt32Size; kNumberOfContextsOffset + kInt32Size;
......
...@@ -28,6 +28,35 @@ int FixedArrayLenFromSize(int size) { ...@@ -28,6 +28,35 @@ int FixedArrayLenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize; return (size - FixedArray::kHeaderSize) / kPointerSize;
} }
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder) {
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
Handle<FixedArray> array;
for (size_t allocated = 0;
allocated != (Page::kAllocatableMemory - remainder);
allocated += array->Size()) {
if (allocated == (Page::kAllocatableMemory - kArraySize)) {
array = isolate->factory()->NewFixedArray(
heap::FixedArrayLenFromSize(kArraySize - remainder), TENURED);
CHECK_EQ(kArraySize - remainder, array->Size());
} else {
array = isolate->factory()->NewFixedArray(kArrayLen, TENURED);
CHECK_EQ(kArraySize, array->Size());
}
if (handles.empty()) {
// Check that allocations started on a new page.
CHECK_EQ(array->address(),
Page::FromAddress(array->address())->area_start());
}
handles.push_back(array);
}
return handles;
}
std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size, std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
PretenureFlag tenure, PretenureFlag tenure,
int object_size) { int object_size) {
......
...@@ -15,6 +15,12 @@ void SealCurrentObjects(Heap* heap); ...@@ -15,6 +15,12 @@ void SealCurrentObjects(Heap* heap);
int FixedArrayLenFromSize(int size); int FixedArrayLenFromSize(int size);
// Fill a page with fixed arrays leaving remainder behind. The function does
// not create additional fillers and assumes that the space has just been
// sealed.
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder);
std::vector<Handle<FixedArray>> CreatePadding( std::vector<Handle<FixedArray>> CreatePadding(
Heap* heap, int padding_size, PretenureFlag tenure, Heap* heap, int padding_size, PretenureFlag tenure,
int object_size = Page::kMaxRegularHeapObjectSize); int object_size = Page::kMaxRegularHeapObjectSize);
......
...@@ -2218,6 +2218,18 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) { ...@@ -2218,6 +2218,18 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
// Test the case where allocation must be done from the free list, so filler // Test the case where allocation must be done from the free list, so filler
// may precede or follow the object. // may precede or follow the object.
TEST(TestAlignedOverAllocation) { TEST(TestAlignedOverAllocation) {
Heap* heap = CcTest::heap();
// Test checks for fillers before and behind objects and requires a fresh
// page and empty free list.
heap::AbandonCurrentlyFreeMemory(heap->old_space());
// Allocate a dummy object to properly set up the linear allocation info.
AllocationResult dummy =
heap->old_space()->AllocateRawUnaligned(kPointerSize);
CHECK(!dummy.IsRetry());
heap->CreateFillerObjectAt(
HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize,
ClearRecordedSlots::kNo);
// Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
const intptr_t double_misalignment = kDoubleSize - kPointerSize; const intptr_t double_misalignment = kDoubleSize - kPointerSize;
Address start; Address start;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "src/v8.h" #include "src/v8.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h" #include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -482,8 +483,7 @@ TEST(LargeObjectSpace) { ...@@ -482,8 +483,7 @@ TEST(LargeObjectSpace) {
CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry()); CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
} }
TEST(SizeOfInitialHeap) {
TEST(SizeOfFirstPageIsLargeEnough) {
if (i::FLAG_always_opt) return; if (i::FLAG_always_opt) return;
// Bootstrapping without a snapshot causes more allocations. // Bootstrapping without a snapshot causes more allocations.
CcTest::InitializeVM(); CcTest::InitializeVM();
...@@ -499,22 +499,31 @@ TEST(SizeOfFirstPageIsLargeEnough) { ...@@ -499,22 +499,31 @@ TEST(SizeOfFirstPageIsLargeEnough) {
return; return;
} }
// If this test fails due to enabling experimental natives that are not part // The limit for each space for an empty isolate containing just the
// of the snapshot, we may need to adjust CalculateFirstPageSizes. // snapshot.
const size_t kMaxInitialSizePerSpace = 2 * MB;
// Freshly initialized VM gets by with one page per space. // Freshly initialized VM gets by with the snapshot size (which is below
// kMaxInitialSizePerSpace per space).
Heap* heap = isolate->heap();
int page_count[LAST_PAGED_SPACE + 1] = {0, 0, 0, 0};
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it. // Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue; if (i == CODE_SPACE && i::FLAG_debug_code) continue;
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
CHECK_LT(static_cast<size_t>(heap->paged_space(i)->CommittedMemory()),
kMaxInitialSizePerSpace);
} }
// Executing the empty script gets by with one page per space. // Executing the empty script gets by with the same number of pages, i.e.,
// requires no extra space.
CompileRun("/*empty*/"); CompileRun("/*empty*/");
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it. // Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue; if (i == CODE_SPACE && i::FLAG_debug_code) continue;
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages());
} }
// No large objects required to perform the above steps. // No large objects required to perform the above steps.
...@@ -685,5 +694,104 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) { ...@@ -685,5 +694,104 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
isolate->Dispose(); isolate->Dispose();
} }
TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
// Prepare page that only contains a single object and a trailing FreeSpace
// filler.
Handle<FixedArray> array = isolate->factory()->NewFixedArray(128, TENURED);
Page* page = Page::FromAddress(array->address());
// Reset space so high water mark is consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace());
size_t shrinked = page->ShrinkToHighWaterMark();
size_t should_have_shrinked =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
base::OS::CommitPageSize());
CHECK_EQ(should_have_shrinked, shrinked);
}
TEST(ShrinkPageToHighWaterMarkNoFiller) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
const int kFillerSize = 0;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
Page* page = Page::FromAddress(array->address());
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0, shrinked);
}
TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
const int kFillerSize = kPointerSize;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
Page* page = Page::FromAddress(array->address());
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0, shrinked);
}
TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
const int kFillerSize = 2 * kPointerSize;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
Page* page = Page::FromAddress(array->address());
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0, shrinked);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment