Commit 1617043c authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of "[heap] Switch to 500k pages" (patchset #11 id:220001 of...

Revert of "[heap] Switch to 500k pages" (patchset #11 id:220001 of https://codereview.chromium.org/2232653003/ )

Reason for revert:
Breaks benchmark with --turbo on avx2

https://build.chromium.org/p/client.v8/builders/V8%20Linux64%20-%20avx2/builds/9895

Original issue's description:
> Reland of "[heap] Switch to 500k pages"
>
> Decrease regular heap object size to 400k. In a follow up, we can now get rid of
> the new space border page while keeping the 1M minimum new space size.
>
> BUG=chromium:636331
>
> This reverts commit 555c9619.
>
> Committed: https://crrev.com/20e2ea80e169e85c5b8231adc02901fb6c989609
> Cr-Commit-Position: refs/heads/master@{#38608}

TBR=hpayer@chromium.org,yangguo@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:636331

Review-Url: https://codereview.chromium.org/2239323002
Cr-Commit-Position: refs/heads/master@{#38613}
parent 227353b7
......@@ -200,7 +200,7 @@
// Bump up for Power Linux due to larger (64K) page size.
const int kPageSizeBits = 22;
#else
const int kPageSizeBits = 19;
const int kPageSizeBits = 20;
#endif
#endif // V8_BASE_BUILD_CONFIG_H_
......@@ -77,7 +77,7 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
......@@ -5429,19 +5429,16 @@ void Heap::PrintAlloctionsHash() {
void Heap::NotifyDeserializationComplete() {
DCHECK_EQ(0, gc_count());
deserialization_complete_ = true;
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
PagedSpaces spaces(this);
for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
for (Page* p : *s) {
CHECK(p->NeverEvacuate());
}
#endif // DEBUG
}
deserialization_complete_ = true;
#endif // DEBUG
}
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
......
......@@ -600,7 +600,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
......@@ -608,10 +608,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
const int kMaxEvacuatedBytes = 4 * MB;
const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = 0.5;
const int kTargetMsPerArea = 1;
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
......@@ -3223,7 +3223,7 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
const double kTargetCompactionTimeInMs = .5;
const double kTargetCompactionTimeInMs = 1;
const int kNumSweepingTasks = 3;
double compaction_speed =
......
......@@ -596,21 +596,6 @@ void MemoryChunk::Unlink() {
set_next_chunk(NULL);
}
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
chunk->area_end_ -= bytes_to_shrink;
UncommitBlock(free_start, bytes_to_shrink);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
}
}
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
......@@ -758,47 +743,6 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
if (filler->address() == area_end()) return 0;
CHECK(filler->IsFiller());
if (!filler->IsFreeSpace()) return 0;
#ifdef DEBUG
// Check the the filler is indeed the last filler on the page.
HeapObjectIterator it(this);
HeapObject* filler2 = nullptr;
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
}
if (filler2 == nullptr || filler2->address() == area_end()) return 0;
DCHECK(filler2->IsFiller());
DCHECK_EQ(filler->address(), filler2->address());
#endif // DEBUG
size_t unused = RoundDown(
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
base::OS::CommitPageSize());
if (unused > 0) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
reinterpret_cast<void*>(this),
reinterpret_cast<void*>(area_end()),
reinterpret_cast<void*>(area_end() - unused));
}
heap()->CreateFillerObjectAt(
filler->address(),
static_cast<int>(area_end() - filler->address() - unused),
ClearRecordedSlots::kNo);
heap()->memory_allocator()->ShrinkChunk(this, unused);
CHECK(filler->IsFiller());
CHECK_EQ(filler->address() + filler->Size(), area_end());
}
return unused;
}
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
......@@ -1270,25 +1214,17 @@ Object* PagedSpace::FindObject(Address addr) {
return Smi::FromInt(0);
}
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
EmptyAllocationInfo();
ResetFreeList();
for (Page* page : *this) {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
AccountUncommitted(unused);
bool PagedSpace::Expand() {
int size = AreaSize();
if (snapshotable() && !HasPages()) {
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
}
bool PagedSpace::Expand() {
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
if (p == nullptr) return false;
AccountCommitted(static_cast<intptr_t>(p->size()));
// Pages created during bootstrapping may contain immortal immovable objects.
......@@ -1379,6 +1315,7 @@ void PagedSpace::IncreaseCapacity(int size) {
void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->LiveBytes(), 0);
DCHECK_EQ(AreaSize(), page->area_size());
DCHECK_EQ(page->owner(), this);
free_list_.EvictFreeListItems(page);
......@@ -1397,8 +1334,10 @@ void PagedSpace::ReleasePage(Page* page) {
}
AccountUncommitted(static_cast<intptr_t>(page->size()));
accounting_stats_.ShrinkSpace(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
DCHECK(Capacity() > 0);
accounting_stats_.ShrinkSpace(AreaSize());
}
#ifdef DEBUG
......
......@@ -235,10 +235,7 @@ class MemoryChunk {
IN_TO_SPACE, // All pages in new space has one of these two set.
NEW_SPACE_BELOW_AGE_MARK,
EVACUATION_CANDIDATE,
// |NEVER_EVACUATE|: A page tagged with this flag will never be selected
// for evacuation. Typically used for immortal immovable pages.
NEVER_EVACUATE,
NEVER_EVACUATE, // May contain immortal immutables.
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
......@@ -725,7 +722,7 @@ class Page : public MemoryChunk {
// account.
// TODO(hpayer): This limit should be way smaller but we currently have
// short living objects >256K.
static const int kMaxRegularHeapObjectSize = 400 * KB;
static const int kMaxRegularHeapObjectSize = 600 * KB;
static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
......@@ -826,8 +823,6 @@ class Page : public MemoryChunk {
available_in_free_list_.Increment(available);
}
size_t ShrinkToHighWaterMark();
#ifdef DEBUG
void Print();
#endif // DEBUG
......@@ -1308,8 +1303,6 @@ class MemoryAllocator {
intptr_t commit_area_size,
Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
Address ReserveAlignedMemory(size_t requested, size_t alignment,
base::VirtualMemory* controller);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
......@@ -1625,7 +1618,6 @@ class AllocationStats BASE_EMBEDDED {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
CHECK_GE(size_, 0);
CHECK_GE(capacity_, 0);
}
// Allocate from available bytes (available -> size).
......@@ -2191,10 +2183,6 @@ class PagedSpace : public Space {
iterator begin() { return iterator(anchor_.next_page()); }
iterator end() { return iterator(&anchor_); }
// Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark.
void ShrinkImmortalImmovablePages();
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......
......@@ -2438,19 +2438,13 @@ bool Isolate::Init(Deserializer* des) {
runtime_profiler_ = new RuntimeProfiler(this);
// If we are deserializing, read the state into the now-empty heap.
{
AlwaysAllocateScope always_allocate(this);
if (!create_heap_objects) {
des->Deserialize(this);
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
if (FLAG_ignition || serializer_enabled()) {
interpreter_->Initialize();
}
heap_.NotifyDeserializationComplete();
if (!create_heap_objects) {
des->Deserialize(this);
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
if (FLAG_ignition || serializer_enabled()) {
interpreter_->Initialize();
}
// Finish initialization of ThreadLocal after deserialization is done.
......@@ -2481,6 +2475,8 @@ bool Isolate::Init(Deserializer* des) {
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
heap_.NotifyDeserializationComplete();
if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the
// deopt entry table that might have been referred to by optimized code in
......
......@@ -4712,7 +4712,6 @@ class FreeSpace: public HeapObject {
// Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize;
static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
static const int kSize = kNextOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
......@@ -10468,12 +10467,12 @@ class JSArray: public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
// 400 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
// 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
// we do not want to include in objects.h
// Note that Page::kMaxRegularHeapObjectSize has to be in sync with
// kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
static const int kInitialMaxFastElementArray =
(400 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
(600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
kPointerSize;
private:
......
......@@ -31,6 +31,19 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
return index < num_contexts;
}
uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
if (!isolate->snapshot_available()) {
return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
}
uint32_t size;
int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size);
return size;
}
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
base::ElapsedTimer timer;
......@@ -76,8 +89,25 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return Handle<Context>::cast(result);
}
void ProfileDeserialization(const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots) {
void UpdateMaxRequirementPerPage(
uint32_t* requirements,
Vector<const SerializedData::Reservation> reservations) {
int space = 0;
uint32_t current_requirement = 0;
for (const auto& reservation : reservations) {
current_requirement += reservation.chunk_size();
if (reservation.is_last()) {
requirements[space] = std::max(requirements[space], current_requirement);
current_requirement = 0;
space++;
}
}
DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
}
void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots,
uint32_t* sizes_out) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
PrintF("Deserialization will reserve:\n");
......@@ -93,6 +123,36 @@ void ProfileDeserialization(const SnapshotData* startup_snapshot,
PrintF("%10d bytes per context #%d\n", context_total, i);
}
}
uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
startup_requirements[space] = 0;
context_requirements[space] = 0;
}
UpdateMaxRequirementPerPage(startup_requirements,
startup_snapshot->Reservations());
for (const auto& context_snapshot : *context_snapshots) {
UpdateMaxRequirementPerPage(context_requirements,
context_snapshot->Reservations());
}
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
// If the space requirement for a page is less than a page size, we consider
// limiting the size of the first page in order to save memory on startup.
uint32_t required = startup_requirements[space] +
2 * context_requirements[space] +
Page::kObjectStartOffset;
// Add a small allowance to the code space for small scripts.
if (space == CODE_SPACE) required += 32 * KB;
if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
uint32_t max_size =
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
}
}
}
v8::StartupData Snapshot::CreateSnapshotBlob(
......@@ -106,9 +166,13 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
total_length += context_snapshot->RawData().length();
}
ProfileDeserialization(startup_snapshot, context_snapshots);
uint32_t first_page_sizes[kNumPagedSpaces];
CalculateFirstPageSizes(startup_snapshot, context_snapshots,
first_page_sizes);
char* data = new char[total_length];
memcpy(data + kFirstPageSizesOffset, first_page_sizes,
kNumPagedSpaces * kInt32Size);
memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
int payload_offset = StartupSnapshotOffset(num_contexts);
int payload_length = startup_snapshot->RawData().length();
......
......@@ -67,6 +67,9 @@ class Snapshot : public AllStatic {
static bool EmbedsScript(Isolate* isolate);
static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
......@@ -85,16 +88,21 @@ class Snapshot : public AllStatic {
int index);
// Snapshot blob layout:
// [0] number of contexts N
// [1] offset to context 0
// [2] offset to context 1
// [0 - 5] pre-calculated first page sizes for paged spaces
// [6] number of contexts N
// [7] offset to context 0
// [8] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
// ... context 0 snapshot data
// ... context 1 snapshot data
static const int kNumberOfContextsOffset = 0;
static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
static const int kFirstPageSizesOffset = 0;
static const int kNumberOfContextsOffset =
kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
static const int kFirstContextOffsetOffset =
kNumberOfContextsOffset + kInt32Size;
......
......@@ -28,35 +28,6 @@ int FixedArrayLenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder) {
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
const int kArraySize = 128;
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
Handle<FixedArray> array;
for (size_t allocated = 0;
allocated != (Page::kAllocatableMemory - remainder);
allocated += array->Size()) {
if (allocated == (Page::kAllocatableMemory - kArraySize)) {
array = isolate->factory()->NewFixedArray(
heap::FixedArrayLenFromSize(kArraySize - remainder), TENURED);
CHECK_EQ(kArraySize - remainder, array->Size());
} else {
array = isolate->factory()->NewFixedArray(kArrayLen, TENURED);
CHECK_EQ(kArraySize, array->Size());
}
if (handles.empty()) {
// Check that allocations started on a new page.
CHECK_EQ(array->address(),
Page::FromAddress(array->address())->area_start());
}
handles.push_back(array);
}
return handles;
}
std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
PretenureFlag tenure,
int object_size) {
......
......@@ -15,12 +15,6 @@ void SealCurrentObjects(Heap* heap);
int FixedArrayLenFromSize(int size);
// Fill a page with fixed arrays leaving remainder behind. The function does
// not create additional fillers and assumes that the space has just been
// sealed.
std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
int remainder);
std::vector<Handle<FixedArray>> CreatePadding(
Heap* heap, int padding_size, PretenureFlag tenure,
int object_size = Page::kMaxRegularHeapObjectSize);
......
......@@ -2218,18 +2218,6 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
// Test the case where allocation must be done from the free list, so filler
// may precede or follow the object.
TEST(TestAlignedOverAllocation) {
Heap* heap = CcTest::heap();
// Test checks for fillers before and behind objects and requires a fresh
// page and empty free list.
heap::AbandonCurrentlyFreeMemory(heap->old_space());
// Allocate a dummy object to properly set up the linear allocation info.
AllocationResult dummy =
heap->old_space()->AllocateRawUnaligned(kPointerSize);
CHECK(!dummy.IsRetry());
heap->CreateFillerObjectAt(
HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize,
ClearRecordedSlots::kNo);
// Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
const intptr_t double_misalignment = kDoubleSize - kPointerSize;
Address start;
......@@ -2371,9 +2359,6 @@ static void FillUpNewSpace(NewSpace* new_space) {
TEST(GrowAndShrinkNewSpace) {
CcTest::InitializeVM();
// Avoid shrinking new space in GC epilogue. This can happen if allocation
// throughput samples have been taken while executing the benchmark.
i::FLAG_predictable = true;
Heap* heap = CcTest::heap();
NewSpace* new_space = heap->new_space();
......@@ -3616,12 +3601,8 @@ TEST(ReleaseOverReservedPages) {
i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
// If there's snapshot available, we don't know whether 20 small arrays will
// fit on the initial pages.
if (!isolate->snapshot_available()) return;
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
static const int number_of_test_pages = 20;
......@@ -3651,8 +3632,14 @@ TEST(ReleaseOverReservedPages) {
"triggered by test 2");
CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
// OS so that other processes can seize the memory. If we get a failure here
// where there are 2 pages left instead of 1, then we should increase the
// size of the first page a little in SizeOfFirstPage in spaces.cc. The
// first page should be small in order to reduce memory used when the VM
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
heap->CollectAllAvailableGarbage("triggered really hard");
// Triggering a last-resort GC should release all additional pages.
CHECK_EQ(initial_page_count, old_space->CountTotalPages());
}
......
......@@ -32,7 +32,6 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
......@@ -479,7 +478,8 @@ TEST(LargeObjectSpace) {
CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
}
TEST(SizeOfInitialHeap) {
TEST(SizeOfFirstPageIsLargeEnough) {
if (i::FLAG_always_opt) return;
// Bootstrapping without a snapshot causes more allocations.
CcTest::InitializeVM();
......@@ -495,31 +495,22 @@ TEST(SizeOfInitialHeap) {
return;
}
// The limit for each space for an empty isolate containing just the
// snapshot.
const size_t kMaxInitialSizePerSpace = 1536 * KB; // 1.5MB
// If this test fails due to enabling experimental natives that are not part
// of the snapshot, we may need to adjust CalculateFirstPageSizes.
// Freshly initialized VM gets by with the snapshot size (which is below
// kMaxInitialSizePerSpace per space).
Heap* heap = isolate->heap();
int page_count[LAST_PAGED_SPACE + 1] = {0, 0, 0, 0};
// Freshly initialized VM gets by with one page per space.
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
CHECK_LT(static_cast<size_t>(heap->paged_space(i)->CommittedMemory()),
kMaxInitialSizePerSpace);
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
}
// Executing the empty script gets by with the same number of pages, i.e.,
// requires no extra space.
// Executing the empty script gets by with one page per space.
CompileRun("/*empty*/");
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages());
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
}
// No large objects required to perform the above steps.
......@@ -690,105 +681,5 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
isolate->Dispose();
}
TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
// Prepare page that only contains a single object and a trailing FreeSpace
// filler.
Handle<FixedArray> array = isolate->factory()->NewFixedArray(128, TENURED);
Page* page = Page::FromAddress(array->address());
// Reset space so high water mark is consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace());
size_t shrinked = page->ShrinkToHighWaterMark();
size_t should_have_shrinked =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
base::OS::CommitPageSize());
CHECK_EQ(should_have_shrinked, shrinked);
}
TEST(ShrinkPageToHighWaterMarkNoFiller) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
const int kFillerSize = 0;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
Page* page = Page::FromAddress(array->address());
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0, shrinked);
}
TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
const int kFillerSize = kPointerSize;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
Page* page = Page::FromAddress(array->address());
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0, shrinked);
}
TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
heap::SealCurrentObjects(CcTest::heap());
const int kFillerSize = 2 * kPointerSize;
std::vector<Handle<FixedArray>> arrays =
heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
Handle<FixedArray> array = arrays.back();
Page* page = Page::FromAddress(array->address());
CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);
// Reset space so high water mark and fillers are consistent.
CcTest::heap()->old_space()->ResetFreeList();
CcTest::heap()->old_space()->EmptyAllocationInfo();
HeapObject* filler =
HeapObject::FromAddress(array->address() + array->Size());
CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
CHECK_EQ(0, shrinked);
}
} // namespace internal
} // namespace v8
......@@ -1177,13 +1177,13 @@ TEST(CodeSerializerThreeBigStrings) {
Vector<const uint8_t> source_b =
ConstructSource(STATIC_CHAR_VECTOR("var b = \""), STATIC_CHAR_VECTOR("b"),
STATIC_CHAR_VECTOR("\";"), 400000);
STATIC_CHAR_VECTOR("\";"), 600000);
Handle<String> source_b_str =
f->NewStringFromOneByte(source_b).ToHandleChecked();
Vector<const uint8_t> source_c =
ConstructSource(STATIC_CHAR_VECTOR("var c = \""), STATIC_CHAR_VECTOR("c"),
STATIC_CHAR_VECTOR("\";"), 400000);
STATIC_CHAR_VECTOR("\";"), 500000);
Handle<String> source_c_str =
f->NewStringFromOneByte(source_c).ToHandleChecked();
......@@ -1216,10 +1216,10 @@ TEST(CodeSerializerThreeBigStrings) {
v8::Maybe<int32_t> result =
CompileRun("(a + b).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
CHECK_EQ(400000 + 700000, result.FromJust());
CHECK_EQ(600000 + 700000, result.FromJust());
result = CompileRun("(b + c).length")
->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
CHECK_EQ(400000 + 400000, result.FromJust());
CHECK_EQ(500000 + 600000, result.FromJust());
Heap* heap = isolate->heap();
v8::Local<v8::String> result_str =
CompileRun("a")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment