Commit 1d837093 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Clean-up MemoryChunk allocation area constants.

Change-Id: I8ba59546ab93c7af98bc5ece2f0160628844dd92
Reviewed-on: https://chromium-review.googlesource.com/c/1280584Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56908}
parent ec969ea3
...@@ -148,9 +148,8 @@ Heap::Heap() ...@@ -148,9 +148,8 @@ Heap::Heap()
} }
size_t Heap::MaxReserved() { size_t Heap::MaxReserved() {
const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory; return static_cast<size_t>(2 * max_semi_space_size_ +
return static_cast<size_t>( max_old_generation_size_);
(2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
} }
size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) { size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
...@@ -240,6 +239,8 @@ size_t Heap::Available() { ...@@ -240,6 +239,8 @@ size_t Heap::Available() {
for (SpaceIterator it(this); it.has_next();) { for (SpaceIterator it(this); it.has_next();) {
total += it.next()->Available(); total += it.next()->Available();
} }
total += memory_allocator()->Available();
return total; return total;
} }
...@@ -1514,7 +1515,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) { ...@@ -1514,7 +1515,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
AllocationResult allocation; AllocationResult allocation;
int size = chunk.size; int size = chunk.size;
DCHECK_LE(static_cast<size_t>(size), DCHECK_LE(static_cast<size_t>(size),
MemoryAllocator::PageAreaSize( MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space))); static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) { if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size); allocation = new_space()->AllocateRawUnaligned(size);
......
...@@ -2366,10 +2366,11 @@ class Evacuator : public Malloced { ...@@ -2366,10 +2366,11 @@ class Evacuator : public Malloced {
// NewSpacePages with more live bytes than this threshold qualify for fast // NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation. // evacuation.
static int PageEvacuationThreshold() { static intptr_t NewSpacePageEvacuationThreshold() {
if (FLAG_page_promotion) if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; return FLAG_page_promotion_threshold *
return Page::kAllocatableMemory + kPointerSize; MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
return MemoryChunkLayout::AllocatableMemoryInDataPage() + kPointerSize;
} }
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
...@@ -2619,7 +2620,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) { ...@@ -2619,7 +2620,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
const bool reduce_memory = heap()->ShouldReduceMemory(); const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark(); const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() && return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::PageEvacuationThreshold()) && (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes); !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
} }
......
...@@ -242,7 +242,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map, ...@@ -242,7 +242,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
HeapObjectReference** slot, HeapObjectReference** slot,
HeapObject* object, HeapObject* object,
int object_size) { int object_size) {
SLOW_DCHECK(object_size <= Page::kAllocatableMemory); SLOW_DCHECK(static_cast<size_t>(object_size) <=
MemoryChunkLayout::AllocatableMemoryInDataPage());
SLOW_DCHECK(object->SizeFromMap(map) == object_size); SLOW_DCHECK(object->SizeFromMap(map) == object_size);
CopyAndForwardResult result; CopyAndForwardResult result;
......
This diff is collapsed.
...@@ -45,6 +45,7 @@ class LinearAllocationArea; ...@@ -45,6 +45,7 @@ class LinearAllocationArea;
class LocalArrayBufferTracker; class LocalArrayBufferTracker;
class MemoryAllocator; class MemoryAllocator;
class MemoryChunk; class MemoryChunk;
class MemoryChunkLayout;
class Page; class Page;
class PagedSpace; class PagedSpace;
class SemiSpace; class SemiSpace;
...@@ -121,9 +122,6 @@ class Space; ...@@ -121,9 +122,6 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \ #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize())) DCHECK((0 < size) && (size <= code_space->AreaSize()))
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
enum FreeListCategoryType { enum FreeListCategoryType {
kTiniest, kTiniest,
kTiny, kTiny,
...@@ -239,6 +237,19 @@ class FreeListCategory { ...@@ -239,6 +237,19 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory); DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
}; };
class MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
// MemoryChunk represents a memory region owned by a specific space. // MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always // It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate // 1MB aligned. Start of the body is aligned so it can accommodate
...@@ -349,7 +360,7 @@ class MemoryChunk { ...@@ -349,7 +360,7 @@ class MemoryChunk {
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize; static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize; static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
static const size_t kMinHeaderSize = static const size_t kHeaderSize =
kSizeOffset // NOLINT kSizeOffset // NOLINT
+ kSizetSize // size_t size + kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_ + kUIntptrSize // uintptr_t flags_
...@@ -382,17 +393,9 @@ class MemoryChunk { ...@@ -382,17 +393,9 @@ class MemoryChunk {
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_ + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kPointerSize; // Bitmap* young_generation_bitmap_ + kPointerSize; // Bitmap* young_generation_bitmap_
static const size_t kHeaderSize = kMinHeaderSize;
// TODO(hpayer): Fix kObjectStartOffset and kAllocatableMemory for code pages.
static const int kObjectStartOffset =
kHeaderSize + (kPointerSize - kHeaderSize % kPointerSize);
// Page size in bytes. This must be a multiple of the OS page size. // Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits; static const int kPageSize = 1 << kPageSizeBits;
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
// Maximum number of nested code memory modification scopes. // Maximum number of nested code memory modification scopes.
// TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap. // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
static const int kMaxWriteUnprotectCounter = 4; static const int kMaxWriteUnprotectCounter = 4;
...@@ -748,10 +751,6 @@ class MemoryChunk { ...@@ -748,10 +751,6 @@ class MemoryChunk {
static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize, static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
"sizeof(std::atomic<intptr_t>) == kPointerSize"); "sizeof(std::atomic<intptr_t>) == kPointerSize");
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger. // A page is a memory chunk of a size 512K. Large object pages may be larger.
// //
...@@ -782,7 +781,7 @@ class Page : public MemoryChunk { ...@@ -782,7 +781,7 @@ class Page : public MemoryChunk {
// Returns the page containing the address provided. The address can // Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values // potentially point righter after the page. To be also safe for tagged values
// we subtract a hole word. The valid address ranges from // we subtract a hole word. The valid address ranges from
// [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize]. // [page_addr + area_start_ .. page_addr + kPageSize + kPointerSize].
static Page* FromAllocationAreaAddress(Address address) { static Page* FromAllocationAreaAddress(Address address) {
return Page::FromAddress(address - kPointerSize); return Page::FromAddress(address - kPointerSize);
} }
...@@ -797,10 +796,6 @@ class Page : public MemoryChunk { ...@@ -797,10 +796,6 @@ class Page : public MemoryChunk {
return (addr & kPageAlignmentMask) == 0; return (addr & kPageAlignmentMask) == 0;
} }
static bool IsAtObjectStart(Address addr) {
return (addr & kPageAlignmentMask) == kObjectStartOffset;
}
static Page* ConvertNewToOld(Page* old_page); static Page* ConvertNewToOld(Page* old_page);
inline void MarkNeverAllocateForTesting(); inline void MarkNeverAllocateForTesting();
...@@ -822,8 +817,10 @@ class Page : public MemoryChunk { ...@@ -822,8 +817,10 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page. // Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) { Address OffsetToAddress(size_t offset) {
DCHECK_PAGE_OFFSET(offset); Address address_in_page = address() + offset;
return address() + offset; DCHECK_GE(address_in_page, area_start_);
DCHECK_LT(address_in_page, area_end_);
return address_in_page;
} }
// WaitUntilSweepingCompleted only works when concurrent sweeping is in // WaitUntilSweepingCompleted only works when concurrent sweeping is in
...@@ -1269,24 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1269,24 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
kPooledAndQueue, kPooledAndQueue,
}; };
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static size_t CodePageAreaStartOffset();
static size_t CodePageAreaEndOffset();
static size_t CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static size_t PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
}
static intptr_t GetCommitPageSize(); static intptr_t GetCommitPageSize();
// Computes the memory area of discardable memory within a given memory area // Computes the memory area of discardable memory within a given memory area
...@@ -1325,11 +1304,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1325,11 +1304,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return capacity_ < size ? 0 : capacity_ - size; return capacity_ < size ? 0 : capacity_ - size;
} }
// Returns maximum available bytes that the old space can have.
size_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
}
// Returns an indication of whether a pointer is in a space that has // Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator. // been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) { V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
...@@ -1910,7 +1884,10 @@ class V8_EXPORT_PRIVATE FreeList { ...@@ -1910,7 +1884,10 @@ class V8_EXPORT_PRIVATE FreeList {
// The size range of blocks, in bytes. // The size range of blocks, in bytes.
static const size_t kMinBlockSize = 3 * kPointerSize; static const size_t kMinBlockSize = 3 * kPointerSize;
static const size_t kMaxBlockSize = Page::kAllocatableMemory;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
static const size_t kTiniestListMax = 0xa * kPointerSize; static const size_t kTiniestListMax = 0xa * kPointerSize;
static const size_t kTinyListMax = 0x1f * kPointerSize; static const size_t kTinyListMax = 0x1f * kPointerSize;
...@@ -2606,7 +2583,8 @@ class NewSpace : public SpaceWithLinearArea { ...@@ -2606,7 +2583,8 @@ class NewSpace : public SpaceWithLinearArea {
// Return the allocated bytes in the active semispace. // Return the allocated bytes in the active semispace.
size_t Size() override { size_t Size() override {
DCHECK_GE(top(), to_space_.page_low()); DCHECK_GE(top(), to_space_.page_low());
return to_space_.pages_used() * Page::kAllocatableMemory + return to_space_.pages_used() *
MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low()); static_cast<size_t>(top() - to_space_.page_low());
} }
...@@ -2616,7 +2594,7 @@ class NewSpace : public SpaceWithLinearArea { ...@@ -2616,7 +2594,7 @@ class NewSpace : public SpaceWithLinearArea {
size_t Capacity() { size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) * return (to_space_.current_capacity() / Page::kPageSize) *
Page::kAllocatableMemory; MemoryChunkLayout::AllocatableMemoryInDataPage();
} }
// Return the current size of a semispace, allocatable and non-allocatable // Return the current size of a semispace, allocatable and non-allocatable
...@@ -2671,7 +2649,7 @@ class NewSpace : public SpaceWithLinearArea { ...@@ -2671,7 +2649,7 @@ class NewSpace : public SpaceWithLinearArea {
} }
while (current_page != last_page) { while (current_page != last_page) {
DCHECK_NE(current_page, age_mark_page); DCHECK_NE(current_page, age_mark_page);
allocated += Page::kAllocatableMemory; allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
current_page = current_page->next_page(); current_page = current_page->next_page();
} }
DCHECK_GE(top(), current_page->area_start()); DCHECK_GE(top(), current_page->area_start());
...@@ -2876,6 +2854,11 @@ class OldSpace : public PagedSpace { ...@@ -2876,6 +2854,11 @@ class OldSpace : public PagedSpace {
// Creates an old space object. The constructor does not allocate pages // Creates an old space object. The constructor does not allocate pages
// from OS. // from OS.
explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {} explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
}; };
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
...@@ -2888,7 +2871,6 @@ class CodeSpace : public PagedSpace { ...@@ -2888,7 +2871,6 @@ class CodeSpace : public PagedSpace {
explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {} explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
}; };
// For contiguous spaces, top should be in the space (or at the end) and limit // For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space. // should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
...@@ -2956,9 +2938,7 @@ class ReadOnlySpace : public PagedSpace { ...@@ -2956,9 +2938,7 @@ class ReadOnlySpace : public PagedSpace {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
// managed by the large object space. A large object is allocated from OS // managed by the large object space.
// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections. // Large objects do not move during garbage collections.
class LargeObjectSpace : public Space { class LargeObjectSpace : public Space {
...@@ -2973,11 +2953,6 @@ class LargeObjectSpace : public Space { ...@@ -2973,11 +2953,6 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space. // Releases internal resources, frees objects in this space.
void TearDown(); void TearDown();
static size_t ObjectSizeFor(size_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size, V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable); Executability executable);
......
...@@ -47,7 +47,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() { ...@@ -47,7 +47,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy)); DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
uint32_t builtin_size = uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy); deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE)); DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
result.push_back({builtin_size, kNullAddress, kNullAddress}); result.push_back({builtin_size, kNullAddress, kNullAddress});
} }
...@@ -61,7 +61,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() { ...@@ -61,7 +61,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
} }
uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i); uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE)); DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
result.push_back({builtin_size, kNullAddress, kNullAddress}); result.push_back({builtin_size, kNullAddress, kNullAddress});
} }
...@@ -126,7 +126,7 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin( ...@@ -126,7 +126,7 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
const uint32_t builtin_size = const uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(builtin_id); deserializer()->ExtractCodeObjectSize(builtin_id);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE)); DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
Handle<HeapObject> o = Handle<HeapObject> o =
isolate()->factory()->NewCodeForDeserialization(builtin_size); isolate()->factory()->NewCodeForDeserialization(builtin_size);
......
...@@ -26,7 +26,8 @@ void DefaultSerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) { ...@@ -26,7 +26,8 @@ void DefaultSerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
static uint32_t PageSizeOfSpace(int space) { static uint32_t PageSizeOfSpace(int space) {
return static_cast<uint32_t>( return static_cast<uint32_t>(
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space))); MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
} }
uint32_t DefaultSerializerAllocator::TargetChunkSize(int space) { uint32_t DefaultSerializerAllocator::TargetChunkSize(int space) {
......
...@@ -39,10 +39,12 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap, ...@@ -39,10 +39,12 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Handle<FixedArray> array; Handle<FixedArray> array;
int allocated = 0; int allocated = 0;
do { do {
if (allocated + kArraySize * 2 > MemoryChunk::kAllocatableMemory) { if (allocated + kArraySize * 2 >
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
int size = int size =
kArraySize * 2 - kArraySize * 2 -
((allocated + kArraySize * 2) - MemoryChunk::kAllocatableMemory) - ((allocated + kArraySize * 2) -
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
remainder; remainder;
int last_array_len = heap::FixedArrayLenFromSize(size); int last_array_len = heap::FixedArrayLenFromSize(size);
array = isolate->factory()->NewFixedArray(last_array_len, TENURED); array = isolate->factory()->NewFixedArray(last_array_len, TENURED);
...@@ -59,7 +61,8 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap, ...@@ -59,7 +61,8 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Page::FromAddress(array->address())->area_start()); Page::FromAddress(array->address())->area_start());
} }
handles.push_back(array); handles.push_back(array);
} while (allocated < MemoryChunk::kAllocatableMemory); } while (allocated <
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
return handles; return handles;
} }
......
...@@ -59,8 +59,10 @@ HEAP_TEST(CompactionFullAbortedPage) { ...@@ -59,8 +59,10 @@ HEAP_TEST(CompactionFullAbortedPage) {
{ {
HandleScope scope2(isolate); HandleScope scope2(isolate);
CHECK(heap->old_space()->Expand()); CHECK(heap->old_space()->Expand());
auto compaction_page_handles = auto compaction_page_handles = heap::CreatePadding(
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED); heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED);
Page* to_be_aborted_page = Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address()); Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag( to_be_aborted_page->SetFlag(
...@@ -93,7 +95,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) { ...@@ -93,7 +95,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
FLAG_manual_evacuation_candidates_selection = true; FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10; const int objects_per_page = 10;
const int object_size = Page::kAllocatableMemory / objects_per_page; const int object_size =
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
objects_per_page;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -109,7 +113,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) { ...@@ -109,7 +113,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
// properly adjusted). // properly adjusted).
CHECK(heap->old_space()->Expand()); CHECK(heap->old_space()->Expand());
auto compaction_page_handles = heap::CreatePadding( auto compaction_page_handles = heap::CreatePadding(
heap, Page::kAllocatableMemory, TENURED, object_size); heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED, object_size);
Page* to_be_aborted_page = Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address()); Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag( to_be_aborted_page->SetFlag(
...@@ -168,7 +174,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { ...@@ -168,7 +174,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
FLAG_manual_evacuation_candidates_selection = true; FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10; const int objects_per_page = 10;
const int object_size = Page::kAllocatableMemory / objects_per_page; const int object_size =
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
objects_per_page;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -187,8 +195,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { ...@@ -187,8 +195,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// properly adjusted). // properly adjusted).
CHECK(heap->old_space()->Expand()); CHECK(heap->old_space()->Expand());
std::vector<Handle<FixedArray>> compaction_page_handles = std::vector<Handle<FixedArray>> compaction_page_handles =
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED, heap::CreatePadding(
object_size); heap,
static_cast<int>(
MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED, object_size);
to_be_aborted_page = to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address()); Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag( to_be_aborted_page->SetFlag(
...@@ -257,7 +268,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) { ...@@ -257,7 +268,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
FLAG_manual_evacuation_candidates_selection = true; FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10; const int objects_per_page = 10;
const int object_size = Page::kAllocatableMemory / objects_per_page; const int object_size =
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
objects_per_page;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -275,7 +288,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) { ...@@ -275,7 +288,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// properly adjusted). // properly adjusted).
CHECK(heap->old_space()->Expand()); CHECK(heap->old_space()->Expand());
auto compaction_page_handles = heap::CreatePadding( auto compaction_page_handles = heap::CreatePadding(
heap, Page::kAllocatableMemory, TENURED, object_size); heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED, object_size);
// Sanity check that we have enough space for linking up arrays. // Sanity check that we have enough space for linking up arrays.
CHECK_GE(compaction_page_handles.front()->length(), 2); CHECK_GE(compaction_page_handles.front()->length(), 2);
to_be_aborted_page = to_be_aborted_page =
......
...@@ -4597,7 +4597,8 @@ TEST(Regress388880) { ...@@ -4597,7 +4597,8 @@ TEST(Regress388880) {
// Allocate padding objects in old pointer space so, that object allocated // Allocate padding objects in old pointer space so, that object allocated
// afterwards would end at the end of the page. // afterwards would end at the end of the page.
heap::SimulateFullSpace(heap->old_space()); heap::SimulateFullSpace(heap->old_space());
size_t padding_size = desired_offset - Page::kObjectStartOffset; size_t padding_size =
desired_offset - MemoryChunkLayout::ObjectStartOffsetInDataPage();
heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED); heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED);
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED); Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
...@@ -6132,7 +6133,8 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit, ...@@ -6132,7 +6133,8 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) { size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) {
// Size to capacity factor. // Size to capacity factor.
double factor = Page::kPageSize * 1.0 / Page::kAllocatableMemory; double factor =
Page::kPageSize * 1.0 / MemoryChunkLayout::AllocatableMemoryInDataPage();
// Some tables (e.g. deoptimization table) are allocated directly with the // Some tables (e.g. deoptimization table) are allocated directly with the
// memory allocator. Allow some slack to account for them. // memory allocator. Allow some slack to account for them.
size_t slack = 5 * MB; size_t slack = 5 * MB;
......
...@@ -76,8 +76,9 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) { ...@@ -76,8 +76,9 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
// To perform a sanity check on live bytes we need to mark the heap. // To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true); heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion. // Sanity check that the page meets the requirements for promotion.
const int threshold_bytes = const int threshold_bytes = static_cast<int>(
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; FLAG_page_promotion_threshold *
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100);
CHECK_GE(heap->incremental_marking()->marking_state()->live_bytes( CHECK_GE(heap->incremental_marking()->marking_state()->live_bytes(
to_be_promoted_page), to_be_promoted_page),
threshold_bytes); threshold_bytes);
......
...@@ -98,19 +98,19 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, ...@@ -98,19 +98,19 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* page_allocator = v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(executable); memory_allocator->page_allocator(executable);
size_t header_size = (executable == EXECUTABLE) size_t allocatable_memory_area_offset =
? MemoryAllocator::CodePageGuardStartOffset() MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
: MemoryChunk::kObjectStartOffset;
size_t guard_size = size_t guard_size =
(executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk( MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, space); reserve_area_size, commit_area_size, executable, space);
size_t reserved_size = size_t reserved_size =
((executable == EXECUTABLE)) ((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size, ? allocatable_memory_area_offset +
page_allocator->CommitPageSize()) RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
: RoundUp(header_size + reserve_area_size, guard_size
: RoundUp(allocatable_memory_area_offset + reserve_area_size,
page_allocator->CommitPageSize()); page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size); CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < CHECK(memory_chunk->area_start() <
...@@ -319,13 +319,9 @@ TEST(LargeObjectSpace) { ...@@ -319,13 +319,9 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(ho)); CHECK(lo->Contains(ho));
while (true) { while (true) {
size_t available = lo->Available();
{ AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE); { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
if (allocation.IsRetry()) break; if (allocation.IsRetry()) break;
} }
// The available value is conservative such that it may report
// zero prior to heap exhaustion.
CHECK(lo->Available() < available || available == 0);
} }
CHECK(!lo->IsEmpty()); CHECK(!lo->IsEmpty());
...@@ -670,9 +666,10 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) { ...@@ -670,9 +666,10 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
HeapObject::FromAddress(array->address() + array->Size()); HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace()); CHECK(filler->IsFreeSpace());
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page); size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
size_t should_have_shrunk = size_t should_have_shrunk = RoundDown(
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()), static_cast<size_t>(MemoryChunkLayout::AllocatableMemoryInDataPage() -
CommitPageSize()); array->Size()),
CommitPageSize());
CHECK_EQ(should_have_shrunk, shrunk); CHECK_EQ(should_have_shrunk, shrunk);
} }
......
...@@ -19322,6 +19322,8 @@ TEST(GetHeapSpaceStatistics) { ...@@ -19322,6 +19322,8 @@ TEST(GetHeapSpaceStatistics) {
CHECK_GT(space_statistics.physical_space_size(), 0u); CHECK_GT(space_statistics.physical_space_size(), 0u);
total_physical_size += space_statistics.physical_space_size(); total_physical_size += space_statistics.physical_space_size();
} }
total_available_size += CcTest::heap()->memory_allocator()->Available();
CHECK_EQ(total_size, heap_statistics.total_heap_size()); CHECK_EQ(total_size, heap_statistics.total_heap_size());
CHECK_EQ(total_used_size, heap_statistics.used_heap_size()); CHECK_EQ(total_used_size, heap_statistics.used_heap_size());
CHECK_EQ(total_available_size, heap_statistics.total_available_size()); CHECK_EQ(total_available_size, heap_statistics.total_available_size());
......
...@@ -48,10 +48,10 @@ bool SequentialUnmapperTest::old_flag_; ...@@ -48,10 +48,10 @@ bool SequentialUnmapperTest::old_flag_;
// See v8:5945. // See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page = Page* page = allocator()->AllocatePage(
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE), MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()), static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE); Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page); EXPECT_NE(nullptr, page);
const int page_size = getpagesize(); const int page_size = getpagesize();
void* start_address = reinterpret_cast<void*>(page->address()); void* start_address = reinterpret_cast<void*>(page->address());
...@@ -66,10 +66,10 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { ...@@ -66,10 +66,10 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
// See v8:5945. // See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
Page* page = Page* page = allocator()->AllocatePage(
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE), MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()), static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE); Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page); EXPECT_NE(nullptr, page);
const int page_size = getpagesize(); const int page_size = getpagesize();
void* start_address = reinterpret_cast<void*>(page->address()); void* start_address = reinterpret_cast<void*>(page->address());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment