Commit 260df721 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Unify LargePage, NewSpacePage, and Page allocation

BUG=

Review URL: https://codereview.chromium.org/1864953003

Cr-Commit-Position: refs/heads/master@{#35284}
parent 974721c6
......@@ -668,8 +668,12 @@ MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, Space* owner) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
}
......
......@@ -684,20 +684,6 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
MemoryChunk* chunk =
AllocateChunk(object_size, object_size, executable, owner);
if (chunk == NULL) return NULL;
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
return LargePage::Initialize(isolate_->heap(), chunk);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
......@@ -780,6 +766,10 @@ template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
PagedSpace>(intptr_t, PagedSpace*,
Executability);
template LargePage*
MemoryAllocator::AllocatePage<LargePage, MemoryAllocator::kRegular, Space>(
intptr_t, Space*, Executability);
template NewSpacePage* MemoryAllocator::AllocatePage<
NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
Executability);
......@@ -2886,8 +2876,9 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
LargePage* page =
heap()->isolate()->memory_allocator()->AllocatePage<LargePage>(
object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
DCHECK(page->area_size() >= object_size);
......
......@@ -882,9 +882,6 @@ class Page : public MemoryChunk {
inline void ClearGCFields();
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
void InitializeAsAnchor(PagedSpace* owner);
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
......@@ -937,6 +934,9 @@ class Page : public MemoryChunk {
inline void ClearEvacuationCandidate();
private:
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
inline void InitializeFreeListCategories();
friend class MemoryAllocator;
......@@ -960,7 +960,8 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
private:
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, Space* owner);
friend class MemoryAllocator;
};
......@@ -1268,9 +1269,6 @@ class MemoryAllocator {
PageType* AllocatePage(intptr_t size, SpaceType* owner,
Executability executable);
LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
Executability executable);
// PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback.
void PreFreeMemory(MemoryChunk* chunk);
......@@ -2289,10 +2287,6 @@ enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
class NewSpacePage : public MemoryChunk {
public:
static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable,
SemiSpace* owner);
static bool IsAtStart(Address addr) {
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
kObjectStartOffset;
......@@ -2340,6 +2334,10 @@ class NewSpacePage : public MemoryChunk {
bool is_anchor() { return !this->InNewSpace(); }
private:
static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable,
SemiSpace* owner);
// GC related flags copied from from-space to to-space when
// flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
......@@ -2355,6 +2353,7 @@ class NewSpacePage : public MemoryChunk {
// Only uses the prev/next links, and sets flags to not be in new-space.
void InitializeAsAnchor(SemiSpace* owner);
friend class MemoryAllocator;
friend class SemiSpace;
friend class SemiSpaceIterator;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment