Commit 1518b1e3 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Refactor page initialization.

This fixes layering between page and its owner, so that the page does
not update the owner state.

Bug: chromium:694255
Change-Id: Ic4f594340bed42d4f2c13d0a30f451317cbc9f50
Reviewed-on: https://chromium-review.googlesource.com/620732Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47437}
parent 38b1807a
......@@ -3616,6 +3616,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (ShouldMovePage(page, live_bytes_on_page)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
DCHECK_EQ(heap()->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are
// going to sweep the page and add page->live_byte_count.
heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
page);
} else {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
......
......@@ -582,42 +582,31 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
return chunk;
}
template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
DCHECK(page->area_size() <= Page::kAllocatableMemory);
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
// In the case we do not free the memory, we effectively account for the whole
// page as allocated memory that cannot be used for further allocations.
if (mode == kFreeMemory) {
owner->IncreaseAllocatedBytes(page->area_size(), page);
owner->Free(page->area_start(), page->area_size());
}
page->ResetAllocatedBytes();
heap()->incremental_marking()->SetOldSpacePageFlags(page);
page->InitializationMemoryFence();
return page;
}
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
SemiSpace* owner) {
Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace);
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page);
heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap->minor_mark_compact_collector()
heap()
->minor_mark_compact_collector()
->non_atomic_marking_state()
->ClearLiveness(page);
}
......@@ -653,10 +642,8 @@ Page* Page::ConvertNewToOld(Page* old_page) {
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
old_space->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
new_page->InsertAfter(old_space->anchor()->prev_page());
Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
old_space->AddPage(new_page);
return new_page;
}
......@@ -1071,7 +1058,7 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
return Page::Initialize(isolate_->heap(), chunk, executable, owner);
return owner->InitializePage(chunk, executable);
}
template Page*
......@@ -1592,18 +1579,14 @@ bool PagedSpace::Expand() {
if (!heap()->CanExpandOldGeneration(size)) return false;
Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
if (p == nullptr) return false;
AccountCommitted(p->size());
Page* page =
heap()->memory_allocator()->AllocatePage(size, this, executable());
if (page == nullptr) return false;
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
AddPage(page);
Free(page->area_start(), page->area_size());
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
p->InsertAfter(anchor_.prev_page());
return true;
}
......
......@@ -622,6 +622,10 @@ class MemoryChunk {
void InsertAfter(MemoryChunk* other);
void Unlink();
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
......@@ -633,10 +637,6 @@ class MemoryChunk {
base::VirtualMemory* reserved_memory() { return &reservation_; }
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
size_t size_;
uintptr_t flags_;
......@@ -813,6 +813,8 @@ class Page : public MemoryChunk {
return &categories_[type];
}
inline void InitializeFreeListCategories();
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
size_t wasted_memory() { return wasted_memory_; }
......@@ -842,14 +844,6 @@ class Page : public MemoryChunk {
private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
static Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
static Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, SemiSpace* owner);
inline void InitializeFreeListCategories();
void InitializeAsAnchor(Space* owner);
friend class MemoryAllocator;
......@@ -2107,9 +2101,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
accounting_stats_.IncreaseCapacity(bytes);
}
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page);
void AccountAddedPage(Page* page);
void AccountRemovedPage(Page* page);
void RefineAllocatedBytesAfterSweeping(Page* page);
......@@ -2117,6 +2108,12 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
Page* InitializePage(MemoryChunk* chunk, Executability executable);
void ReleasePage(Page* page);
void AddPage(Page* page);
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
#ifdef VERIFY_HEAP
// Verify integrity of this space.
......@@ -2180,11 +2177,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// Remove a page if it has at least |size_in_bytes| bytes available that can
// be used for allocation.
Page* RemovePageSafe(int size_in_bytes);
void AddPage(Page* page);
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......@@ -2333,6 +2325,7 @@ class SemiSpace : public Space {
void RemovePage(Page* page);
void PrependPage(Page* page);
Page* InitializePage(MemoryChunk* chunk, Executability executable);
// Age mark accessors.
Address age_mark() { return age_mark_; }
......
......@@ -52,8 +52,6 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
heap()->old_space()->UnlinkFreeListCategories(page);
heap()->old_space()->AccountRemovedPage(page);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = static_cast<void*>(page->address());
......@@ -72,8 +70,6 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
heap()->old_space()->UnlinkFreeListCategories(page);
heap()->old_space()->AccountRemovedPage(page);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = static_cast<void*>(page->address());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment