Commit 1d837093 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Clean-up MemoryChunk allocation area constants.

Change-Id: I8ba59546ab93c7af98bc5ece2f0160628844dd92
Reviewed-on: https://chromium-review.googlesource.com/c/1280584Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56908}
parent ec969ea3
......@@ -148,9 +148,8 @@ Heap::Heap()
}
size_t Heap::MaxReserved() {
const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
return static_cast<size_t>(
(2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
return static_cast<size_t>(2 * max_semi_space_size_ +
max_old_generation_size_);
}
size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
......@@ -240,6 +239,8 @@ size_t Heap::Available() {
for (SpaceIterator it(this); it.has_next();) {
total += it.next()->Available();
}
total += memory_allocator()->Available();
return total;
}
......@@ -1514,7 +1515,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
AllocationResult allocation;
int size = chunk.size;
DCHECK_LE(static_cast<size_t>(size),
MemoryAllocator::PageAreaSize(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
......
......@@ -2366,10 +2366,11 @@ class Evacuator : public Malloced {
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
static int PageEvacuationThreshold() {
static intptr_t NewSpacePageEvacuationThreshold() {
if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
return Page::kAllocatableMemory + kPointerSize;
return FLAG_page_promotion_threshold *
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
return MemoryChunkLayout::AllocatableMemoryInDataPage() + kPointerSize;
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
......@@ -2619,7 +2620,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::PageEvacuationThreshold()) &&
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
}
......
......@@ -242,7 +242,8 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
HeapObjectReference** slot,
HeapObject* object,
int object_size) {
SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(static_cast<size_t>(object_size) <=
MemoryChunkLayout::AllocatableMemoryInDataPage());
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
CopyAndForwardResult result;
......
This diff is collapsed.
......@@ -45,6 +45,7 @@ class LinearAllocationArea;
class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
class MemoryChunkLayout;
class Page;
class PagedSpace;
class SemiSpace;
......@@ -121,9 +122,6 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
enum FreeListCategoryType {
kTiniest,
kTiny,
......@@ -239,6 +237,19 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
class MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static intptr_t ObjectStartOffsetInCodePage();
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
......@@ -349,7 +360,7 @@ class MemoryChunk {
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
static const size_t kMinHeaderSize =
static const size_t kHeaderSize =
kSizeOffset // NOLINT
+ kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
......@@ -382,17 +393,9 @@ class MemoryChunk {
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kPointerSize; // Bitmap* young_generation_bitmap_
static const size_t kHeaderSize = kMinHeaderSize;
// TODO(hpayer): Fix kObjectStartOffset and kAllocatableMemory for code pages.
static const int kObjectStartOffset =
kHeaderSize + (kPointerSize - kHeaderSize % kPointerSize);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
// Maximum number of nested code memory modification scopes.
// TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
static const int kMaxWriteUnprotectCounter = 4;
......@@ -748,10 +751,6 @@ class MemoryChunk {
static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
"sizeof(std::atomic<intptr_t>) == kPointerSize");
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger.
//
......@@ -782,7 +781,7 @@ class Page : public MemoryChunk {
// Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values
// we subtract a hole word. The valid address ranges from
// [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
// [page_addr + area_start_ .. page_addr + kPageSize + kPointerSize].
static Page* FromAllocationAreaAddress(Address address) {
return Page::FromAddress(address - kPointerSize);
}
......@@ -797,10 +796,6 @@ class Page : public MemoryChunk {
return (addr & kPageAlignmentMask) == 0;
}
static bool IsAtObjectStart(Address addr) {
return (addr & kPageAlignmentMask) == kObjectStartOffset;
}
static Page* ConvertNewToOld(Page* old_page);
inline void MarkNeverAllocateForTesting();
......@@ -822,8 +817,10 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
DCHECK_PAGE_OFFSET(offset);
return address() + offset;
Address address_in_page = address() + offset;
DCHECK_GE(address_in_page, area_start_);
DCHECK_LT(address_in_page, area_end_);
return address_in_page;
}
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
......@@ -1269,24 +1266,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
kPooledAndQueue,
};
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
static size_t CodePageAreaStartOffset();
static size_t CodePageAreaEndOffset();
static size_t CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
static size_t PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
}
static intptr_t GetCommitPageSize();
// Computes the memory area of discardable memory within a given memory area
......@@ -1325,11 +1304,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return capacity_ < size ? 0 : capacity_ - size;
}
// Returns maximum available bytes that the old space can have.
size_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
}
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
......@@ -1910,7 +1884,10 @@ class V8_EXPORT_PRIVATE FreeList {
// The size range of blocks, in bytes.
static const size_t kMinBlockSize = 3 * kPointerSize;
static const size_t kMaxBlockSize = Page::kAllocatableMemory;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
static const size_t kTiniestListMax = 0xa * kPointerSize;
static const size_t kTinyListMax = 0x1f * kPointerSize;
......@@ -2606,7 +2583,8 @@ class NewSpace : public SpaceWithLinearArea {
// Return the allocated bytes in the active semispace.
size_t Size() override {
DCHECK_GE(top(), to_space_.page_low());
return to_space_.pages_used() * Page::kAllocatableMemory +
return to_space_.pages_used() *
MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low());
}
......@@ -2616,7 +2594,7 @@ class NewSpace : public SpaceWithLinearArea {
size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
Page::kAllocatableMemory;
MemoryChunkLayout::AllocatableMemoryInDataPage();
}
// Return the current size of a semispace, allocatable and non-allocatable
......@@ -2671,7 +2649,7 @@ class NewSpace : public SpaceWithLinearArea {
}
while (current_page != last_page) {
DCHECK_NE(current_page, age_mark_page);
allocated += Page::kAllocatableMemory;
allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
current_page = current_page->next_page();
}
DCHECK_GE(top(), current_page->area_start());
......@@ -2876,6 +2854,11 @@ class OldSpace : public PagedSpace {
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
MemoryChunkLayout::ObjectStartOffsetInDataPage();
}
};
// -----------------------------------------------------------------------------
......@@ -2888,7 +2871,6 @@ class CodeSpace : public PagedSpace {
explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
......@@ -2956,9 +2938,7 @@ class ReadOnlySpace : public PagedSpace {
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
// managed by the large object space. A large object is allocated from OS
// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
// A large object always starts at Page::kObjectStartOffset to a page.
// managed by the large object space.
// Large objects do not move during garbage collections.
class LargeObjectSpace : public Space {
......@@ -2973,11 +2953,6 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
static size_t ObjectSizeFor(size_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
......
......@@ -47,7 +47,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
DCHECK(!Builtins::IsLazy(Builtins::kDeserializeLazy));
uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(Builtins::kDeserializeLazy);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
......@@ -61,7 +61,7 @@ BuiltinDeserializerAllocator::CreateReservationsForEagerBuiltins() {
}
uint32_t builtin_size = deserializer()->ExtractCodeObjectSize(i);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
result.push_back({builtin_size, kNullAddress, kNullAddress});
}
......@@ -126,7 +126,7 @@ void BuiltinDeserializerAllocator::ReserveAndInitializeBuiltinsTableForBuiltin(
const uint32_t builtin_size =
deserializer()->ExtractCodeObjectSize(builtin_id);
DCHECK_LE(builtin_size, MemoryAllocator::PageAreaSize(CODE_SPACE));
DCHECK_LE(builtin_size, MemoryChunkLayout::AllocatableMemoryInCodePage());
Handle<HeapObject> o =
isolate()->factory()->NewCodeForDeserialization(builtin_size);
......
......@@ -26,7 +26,8 @@ void DefaultSerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
static uint32_t PageSizeOfSpace(int space) {
return static_cast<uint32_t>(
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
}
uint32_t DefaultSerializerAllocator::TargetChunkSize(int space) {
......
......@@ -39,10 +39,12 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Handle<FixedArray> array;
int allocated = 0;
do {
if (allocated + kArraySize * 2 > MemoryChunk::kAllocatableMemory) {
if (allocated + kArraySize * 2 >
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) {
int size =
kArraySize * 2 -
((allocated + kArraySize * 2) - MemoryChunk::kAllocatableMemory) -
((allocated + kArraySize * 2) -
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage())) -
remainder;
int last_array_len = heap::FixedArrayLenFromSize(size);
array = isolate->factory()->NewFixedArray(last_array_len, TENURED);
......@@ -59,7 +61,8 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
Page::FromAddress(array->address())->area_start());
}
handles.push_back(array);
} while (allocated < MemoryChunk::kAllocatableMemory);
} while (allocated <
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()));
return handles;
}
......
......@@ -59,8 +59,10 @@ HEAP_TEST(CompactionFullAbortedPage) {
{
HandleScope scope2(isolate);
CHECK(heap->old_space()->Expand());
auto compaction_page_handles =
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
auto compaction_page_handles = heap::CreatePadding(
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED);
Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
......@@ -93,7 +95,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
const int object_size = Page::kAllocatableMemory / objects_per_page;
const int object_size =
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -109,7 +113,9 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
auto compaction_page_handles = heap::CreatePadding(
heap, Page::kAllocatableMemory, TENURED, object_size);
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED, object_size);
Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
......@@ -168,7 +174,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
const int object_size = Page::kAllocatableMemory / objects_per_page;
const int object_size =
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -187,8 +195,11 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
std::vector<Handle<FixedArray>> compaction_page_handles =
heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
object_size);
heap::CreatePadding(
heap,
static_cast<int>(
MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED, object_size);
to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
......@@ -257,7 +268,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
FLAG_manual_evacuation_candidates_selection = true;
const int objects_per_page = 10;
const int object_size = Page::kAllocatableMemory / objects_per_page;
const int object_size =
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -275,7 +288,9 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
auto compaction_page_handles = heap::CreatePadding(
heap, Page::kAllocatableMemory, TENURED, object_size);
heap,
static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
TENURED, object_size);
// Sanity check that we have enough space for linking up arrays.
CHECK_GE(compaction_page_handles.front()->length(), 2);
to_be_aborted_page =
......
......@@ -4597,7 +4597,8 @@ TEST(Regress388880) {
// Allocate padding objects in old pointer space so, that object allocated
// afterwards would end at the end of the page.
heap::SimulateFullSpace(heap->old_space());
size_t padding_size = desired_offset - Page::kObjectStartOffset;
size_t padding_size =
desired_offset - MemoryChunkLayout::ObjectStartOffsetInDataPage();
heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED);
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
......@@ -6132,7 +6133,8 @@ size_t NearHeapLimitCallback(void* raw_state, size_t current_heap_limit,
size_t MemoryAllocatorSizeFromHeapCapacity(size_t capacity) {
// Size to capacity factor.
double factor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
double factor =
Page::kPageSize * 1.0 / MemoryChunkLayout::AllocatableMemoryInDataPage();
// Some tables (e.g. deoptimization table) are allocated directly with the
// memory allocator. Allow some slack to account for them.
size_t slack = 5 * MB;
......
......@@ -76,8 +76,9 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
// To perform a sanity check on live bytes we need to mark the heap.
heap::SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
const int threshold_bytes = static_cast<int>(
FLAG_page_promotion_threshold *
MemoryChunkLayout::AllocatableMemoryInDataPage() / 100);
CHECK_GE(heap->incremental_marking()->marking_state()->live_bytes(
to_be_promoted_page),
threshold_bytes);
......
......@@ -98,19 +98,19 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* page_allocator =
memory_allocator->page_allocator(executable);
size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
: MemoryChunk::kObjectStartOffset;
size_t allocatable_memory_area_offset =
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
size_t guard_size =
(executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
reserve_area_size, commit_area_size, executable, space);
size_t reserved_size =
((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
page_allocator->CommitPageSize())
: RoundUp(header_size + reserve_area_size,
? allocatable_memory_area_offset +
RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
guard_size
: RoundUp(allocatable_memory_area_offset + reserve_area_size,
page_allocator->CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() <
......@@ -319,13 +319,9 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(ho));
while (true) {
size_t available = lo->Available();
{ AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
if (allocation.IsRetry()) break;
}
// The available value is conservative such that it may report
// zero prior to heap exhaustion.
CHECK(lo->Available() < available || available == 0);
}
CHECK(!lo->IsEmpty());
......@@ -670,9 +666,10 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
HeapObject::FromAddress(array->address() + array->Size());
CHECK(filler->IsFreeSpace());
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
size_t should_have_shrunk =
RoundDown(static_cast<size_t>(Page::kAllocatableMemory - array->Size()),
CommitPageSize());
size_t should_have_shrunk = RoundDown(
static_cast<size_t>(MemoryChunkLayout::AllocatableMemoryInDataPage() -
array->Size()),
CommitPageSize());
CHECK_EQ(should_have_shrunk, shrunk);
}
......
......@@ -19322,6 +19322,8 @@ TEST(GetHeapSpaceStatistics) {
CHECK_GT(space_statistics.physical_space_size(), 0u);
total_physical_size += space_statistics.physical_space_size();
}
total_available_size += CcTest::heap()->memory_allocator()->Available();
CHECK_EQ(total_size, heap_statistics.total_heap_size());
CHECK_EQ(total_used_size, heap_statistics.used_heap_size());
CHECK_EQ(total_available_size, heap_statistics.total_available_size());
......
......@@ -48,10 +48,10 @@ bool SequentialUnmapperTest::old_flag_;
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page =
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
Page* page = allocator()->AllocatePage(
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = reinterpret_cast<void*>(page->address());
......@@ -66,10 +66,10 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
Page* page =
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
Page* page = allocator()->AllocatePage(
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = reinterpret_cast<void*>(page->address());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment