Commit c4ca06f3 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Cleanup page initialization

Remove dead code on the way.

Bug: v8:6474
Change-Id: I7edb4277bc53ee92edf9523b943492782ec6efac
Reviewed-on: https://chromium-review.googlesource.com/538652Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45976}
parent 015edc60
......@@ -24,6 +24,7 @@
#include "src/objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
#include "src/string-hasher.h"
namespace v8 {
......
......@@ -7,10 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/msan.h"
#include "src/profiler/heap-profiler.h"
#include "src/v8memory.h"
namespace v8 {
namespace internal {
......@@ -93,36 +90,6 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return nullptr;
}
// -----------------------------------------------------------------------------
// MemoryAllocator
#ifdef ENABLE_HEAP_PROTECTION
void MemoryAllocator::Protect(Address start, size_t size) {
base::OS::Protect(start, size);
}
void MemoryAllocator::Unprotect(Address start, size_t size,
Executability executable) {
base::OS::Unprotect(start, size, executable);
}
void MemoryAllocator::ProtectChunkFromPage(Page* page) {
int id = GetChunkId(page);
base::OS::Protect(chunks_[id].address(), chunks_[id].size());
}
void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
int id = GetChunkId(page);
base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
chunks_[id].owner()->executable() == EXECUTABLE);
}
#endif
// -----------------------------------------------------------------------------
// SemiSpace
......@@ -169,61 +136,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
MarkingState::External(page).ClearLiveness();
}
return page;
}
// --------------------------------------------------------------------------
// PagedSpace
template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
// In the case we do not free the memory, we effectively account for the whole
// page as allocated memory that cannot be used for further allocations.
if (mode == kFreeMemory) {
owner->Free(page->area_start(), page->area_size());
}
return page;
}
Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(!old_page->is_anchor());
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
old_space->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
new_page->InsertAfter(old_space->anchor()->prev_page());
return new_page;
}
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
......@@ -575,27 +487,6 @@ MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, Space* owner) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
// Initialize the owner field for each contained page (except the first, which
// is initialized by MemoryChunk::Initialize).
for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
addr < chunk->area_end(); addr += Page::kPageSize) {
// Clear out kPageHeaderTag.
Memory::Address_at(addr) = 0;
}
return static_cast<LargePage*>(chunk);
}
size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
......
......@@ -574,6 +574,78 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
return chunk;
}
template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
// In the case we do not free the memory, we effectively account for the whole
// page as allocated memory that cannot be used for further allocations.
if (mode == kFreeMemory) {
owner->Free(page->area_start(), page->area_size());
}
return page;
}
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
MarkingState::External(page).ClearLiveness();
}
return page;
}
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, Space* owner) {
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
// Initialize the owner field for each contained page (except the first, which
// is initialized by MemoryChunk::Initialize).
for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
addr < chunk->area_end(); addr += Page::kPageSize) {
// Clear out kPageHeaderTag.
Memory::Address_at(addr) = 0;
}
return static_cast<LargePage*>(chunk);
}
Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(!old_page->is_anchor());
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
old_space->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
new_page->InsertAfter(old_space->anchor()->prev_page());
return new_page;
}
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
......
......@@ -743,8 +743,6 @@ class Page : public MemoryChunk {
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static inline Page* ConvertNewToOld(Page* old_page);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
......@@ -775,6 +773,8 @@ class Page : public MemoryChunk {
kObjectStartOffset;
}
static Page* ConvertNewToOld(Page* old_page);
inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
// Create a Page object that is only used as anchor for the doubly-linked
......@@ -856,10 +856,10 @@ class Page : public MemoryChunk {
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, SemiSpace* owner);
static Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
static Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, SemiSpace* owner);
inline void InitializeFreeListCategories();
......@@ -891,8 +891,8 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
private:
static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, Space* owner);
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, Space* owner);
friend class MemoryAllocator;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment