Commit 91c12223 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Remove anchor page from Space.

Replaces the anchor page circular doubly linked list
with a doubly linked list pointing to nullptr on its ends.

Fixes a memory leak when rewinding pages.

The large pages list will move to the new list implementation
in a follow-up CL.

Change-Id: I2933a5e222d4ca768f4b555c47ed0d7a7027aa73
Reviewed-on: https://chromium-review.googlesource.com/1060973
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53288}
parent 131866fb
...@@ -2814,6 +2814,7 @@ v8_component("v8_libbase") { ...@@ -2814,6 +2814,7 @@ v8_component("v8_libbase") {
"src/base/ieee754.h", "src/base/ieee754.h",
"src/base/iterator.h", "src/base/iterator.h",
"src/base/lazy-instance.h", "src/base/lazy-instance.h",
"src/base/list.h",
"src/base/logging.cc", "src/base/logging.cc",
"src/base/logging.h", "src/base/logging.h",
"src/base/macros.h", "src/base/macros.h",
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_LIST_H_
#define V8_BASE_LIST_H_
#include <atomic>
#include "src/base/logging.h"
// TODO(hpayer): Remove as soon LargePage was ported to use List.
namespace v8 {
namespace internal {
class LargePage;
}
} // namespace v8
namespace v8 {
namespace base {
template <class T>
class List {
public:
List() : front_(nullptr), back_(nullptr) {}
void PushBack(T* element) {
DCHECK(!element->list_node().next());
DCHECK(!element->list_node().prev());
if (back_) {
DCHECK(front_);
InsertAfter(element, back_);
} else {
AddFirstElement(element);
}
}
void PushFront(T* element) {
DCHECK(!element->list_node().next());
DCHECK(!element->list_node().prev());
if (front_) {
DCHECK(back_);
InsertBefore(element, front_);
} else {
AddFirstElement(element);
}
}
void Remove(T* element) {
DCHECK(Contains(element));
if (back_ == element) {
back_ = element->list_node().prev();
}
if (front_ == element) {
front_ = element->list_node().next();
}
T* next = element->list_node().next();
T* prev = element->list_node().prev();
if (next) next->list_node().set_prev(prev);
if (prev) prev->list_node().set_next(next);
element->list_node().set_prev(nullptr);
element->list_node().set_next(nullptr);
}
bool Contains(T* element) {
T* it = front_;
while (it) {
if (it == element) return true;
it = it->list_node().next();
}
return false;
}
bool Empty() { return !front_ && !back_; }
T* front() { return front_; }
T* back() { return back_; }
private:
void AddFirstElement(T* element) {
DCHECK(!back_);
DCHECK(!front_);
DCHECK(!element->list_node().next());
DCHECK(!element->list_node().prev());
element->list_node().set_prev(nullptr);
element->list_node().set_next(nullptr);
front_ = element;
back_ = element;
}
void InsertAfter(T* element, T* other) {
T* other_next = other->list_node().next();
element->list_node().set_next(other_next);
element->list_node().set_prev(other);
other->list_node().set_next(element);
if (other_next)
other_next->list_node().set_prev(element);
else
back_ = element;
}
void InsertBefore(T* element, T* other) {
T* other_prev = other->list_node().prev();
element->list_node().set_next(other);
element->list_node().set_prev(other_prev);
other->list_node().set_prev(element);
if (other_prev) {
other_prev->list_node().set_next(element);
} else {
front_ = element;
}
}
T* front_;
T* back_;
};
template <class T>
class ListNode {
public:
ListNode() { Initialize(); }
T* next() { return next_; }
T* prev() { return prev_; }
void Initialize() {
next_ = nullptr;
prev_ = nullptr;
}
private:
void set_next(T* next) { next_ = next; }
void set_prev(T* prev) { prev_ = prev; }
T* next_;
T* prev_;
friend class List<T>;
// TODO(hpayer): Remove as soon LargePage was ported to use List.
friend class v8::internal::LargePage;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_LIST_H_
...@@ -489,13 +489,12 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) { ...@@ -489,13 +489,12 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
void Heap::UpdateAllocationSite(Map* map, HeapObject* object, void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback) { PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_); DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
DCHECK(InFromSpace(object) || DCHECK(
(InToSpace(object) && InFromSpace(object) ||
Page::FromAddress(object->address()) (InToSpace(object) && Page::FromAddress(object->address())
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) || ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
(!InNewSpace(object) && (!InNewSpace(object) && Page::FromAddress(object->address())
Page::FromAddress(object->address()) ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring || if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(map->instance_type())) !AllocationSite::CanTrack(map->instance_type()))
return; return;
......
...@@ -1978,7 +1978,7 @@ void Heap::EvacuateYoungGeneration() { ...@@ -1978,7 +1978,7 @@ void Heap::EvacuateYoungGeneration() {
PageRange range(new_space()->bottom(), new_space()->top()); PageRange range(new_space()->bottom(), new_space()->top());
for (auto it = range.begin(); it != range.end();) { for (auto it = range.begin(); it != range.end();) {
Page* p = (*++it)->prev_page(); Page* p = (*++it)->prev_page();
p->Unlink(); new_space()->from_space().RemovePage(p);
Page::ConvertNewToOld(p); Page::ConvertNewToOld(p);
if (incremental_marking()->IsMarking()) if (incremental_marking()->IsMarking())
mark_compact_collector()->RecordLiveSlotsOnPage(p); mark_compact_collector()->RecordLiveSlotsOnPage(p);
...@@ -4530,7 +4530,7 @@ HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object, ...@@ -4530,7 +4530,7 @@ HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
DCHECK_GE(object_size, 0); DCHECK_GE(object_size, 0);
if (!Heap::IsImmovable(heap_object)) { if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() || if (isolate()->serializer_enabled() ||
code_space_->FirstPage()->Contains(heap_object->address())) { code_space_->first_page()->Contains(heap_object->address())) {
MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate(); MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
} else { } else {
// Discard the first code allocation, which was on a page where it could // Discard the first code allocation, which was on a page where it could
......
...@@ -1365,7 +1365,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor { ...@@ -1365,7 +1365,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
break; break;
case NEW_TO_OLD: { case NEW_TO_OLD: {
page->Unlink(); page->heap()->new_space()->from_space().RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page); Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InNewSpace()); DCHECK(!new_page->InNewSpace());
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
...@@ -3160,7 +3160,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() { ...@@ -3160,7 +3160,7 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
} else { } else {
DCHECK(p->IsEvacuationCandidate()); DCHECK(p->IsEvacuationCandidate());
DCHECK(p->SweepingDone()); DCHECK(p->SweepingDone());
p->Unlink(); p->owner()->memory_chunk_list().Remove(p);
} }
} }
DCHECK_EQ(aborted_pages_verified, aborted_pages); DCHECK_EQ(aborted_pages_verified, aborted_pages);
...@@ -3221,6 +3221,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ...@@ -3221,6 +3221,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
static_cast<void*>(p)); static_cast<void*>(p));
} }
ArrayBufferTracker::FreeAll(p); ArrayBufferTracker::FreeAll(p);
space->memory_chunk_list().Remove(p);
space->ReleasePage(p); space->ReleasePage(p);
continue; continue;
} }
......
...@@ -45,7 +45,7 @@ HeapObject* SemiSpaceIterator::Next() { ...@@ -45,7 +45,7 @@ HeapObject* SemiSpaceIterator::Next() {
if (Page::IsAlignedToPageSize(current_)) { if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_); Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page(); page = page->next_page();
DCHECK(!page->is_anchor()); DCHECK(page);
current_ = page->area_start(); current_ = page->area_start();
if (current_ == limit_) return nullptr; if (current_ == limit_) return nullptr;
} }
......
...@@ -34,7 +34,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space) ...@@ -34,7 +34,7 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
: cur_addr_(kNullAddress), : cur_addr_(kNullAddress),
cur_end_(kNullAddress), cur_end_(kNullAddress),
space_(space), space_(space),
page_range_(space->anchor()->next_page(), space->anchor()), page_range_(space->first_page(), nullptr),
current_page_(page_range_.begin()) {} current_page_(page_range_.begin()) {}
HeapObjectIterator::HeapObjectIterator(Page* page) HeapObjectIterator::HeapObjectIterator(Page* page)
...@@ -531,14 +531,6 @@ Address MemoryAllocator::AllocateAlignedMemory( ...@@ -531,14 +531,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base; return base;
} }
void Page::InitializeAsAnchor(Space* space) {
set_owner(space);
set_next_chunk(this);
set_prev_chunk(this);
SetFlags(0, static_cast<uintptr_t>(~0));
SetFlag(ANCHOR);
}
Heap* MemoryChunk::synchronized_heap() { Heap* MemoryChunk::synchronized_heap() {
return reinterpret_cast<Heap*>( return reinterpret_cast<Heap*>(
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_))); base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
...@@ -632,8 +624,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -632,8 +624,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size(); chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0; chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr; chunk->young_generation_bitmap_ = nullptr;
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr; chunk->local_tracker_ = nullptr;
for (int i = kFirstCategory; i < kNumberOfCategories; i++) { for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
...@@ -681,6 +671,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) { ...@@ -681,6 +671,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
heap()->incremental_marking()->SetOldSpacePageFlags(page); heap()->incremental_marking()->SetOldSpacePageFlags(page);
page->AllocateFreeListCategories(); page->AllocateFreeListCategories();
page->InitializeFreeListCategories(); page->InitializeFreeListCategories();
page->list_node().Initialize();
page->InitializationMemoryFence(); page->InitializationMemoryFence();
return page; return page;
} }
...@@ -695,6 +686,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) { ...@@ -695,6 +686,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk); Page* page = static_cast<Page*>(chunk);
heap()->incremental_marking()->SetNewSpacePageFlags(page); heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker(); page->AllocateLocalTracker();
page->list_node().Initialize();
#ifdef ENABLE_MINOR_MC #ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) { if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap(); page->AllocateYoungGenerationBitmap();
...@@ -753,7 +745,7 @@ void Page::ReleaseFreeListCategories() { ...@@ -753,7 +745,7 @@ void Page::ReleaseFreeListCategories() {
} }
Page* Page::ConvertNewToOld(Page* old_page) { Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(!old_page->is_anchor()); DCHECK(old_page);
DCHECK(old_page->InNewSpace()); DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space(); OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space); old_page->set_owner(old_space);
...@@ -773,25 +765,6 @@ bool MemoryChunk::IsPagedSpace() const { ...@@ -773,25 +765,6 @@ bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE; return owner()->identity() != LO_SPACE;
} }
void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk();
set_next_chunk(other_next);
set_prev_chunk(other);
other_next->set_prev_chunk(this);
other->set_next_chunk(this);
}
void MemoryChunk::Unlink() {
MemoryChunk* next_element = next_chunk();
MemoryChunk* prev_element = prev_chunk();
next_element->set_prev_chunk(prev_element);
prev_element->set_next_chunk(next_element);
set_prev_chunk(nullptr);
set_next_chunk(nullptr);
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size, size_t commit_area_size,
Executability executable, Executability executable,
...@@ -1464,7 +1437,7 @@ intptr_t Space::GetNextInlineAllocationStepSize() { ...@@ -1464,7 +1437,7 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable) Executability executable)
: SpaceWithLinearArea(heap, space), executable_(executable), anchor_(this) { : SpaceWithLinearArea(heap, space), executable_(executable) {
area_size_ = MemoryAllocator::PageAreaSize(space); area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear(); accounting_stats_.Clear();
} }
...@@ -1477,12 +1450,11 @@ bool PagedSpace::HasBeenSetUp() { return true; } ...@@ -1477,12 +1450,11 @@ bool PagedSpace::HasBeenSetUp() { return true; }
void PagedSpace::TearDown() { void PagedSpace::TearDown() {
for (auto it = begin(); it != end();) { while (!memory_chunk_list_.Empty()) {
Page* page = *(it++); // Will be erased. MemoryChunk* chunk = memory_chunk_list_.front();
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page); memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
} }
anchor_.set_next_page(&anchor_);
anchor_.set_prev_page(&anchor_);
accounting_stats_.Clear(); accounting_stats_.Clear();
} }
...@@ -1526,7 +1498,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -1526,7 +1498,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK(identity() == other->identity()); DCHECK(identity() == other->identity());
// Unmerged fields: // Unmerged fields:
// area_size_ // area_size_
// anchor_
other->FreeLinearAllocationArea(); other->FreeLinearAllocationArea();
...@@ -1610,7 +1581,7 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) { ...@@ -1610,7 +1581,7 @@ Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
size_t PagedSpace::AddPage(Page* page) { size_t PagedSpace::AddPage(Page* page) {
CHECK(page->SweepingDone()); CHECK(page->SweepingDone());
page->set_owner(this); page->set_owner(this);
page->InsertAfter(anchor()->prev_page()); memory_chunk_list_.PushBack(page);
AccountCommitted(page->size()); AccountCommitted(page->size());
IncreaseCapacity(page->area_size()); IncreaseCapacity(page->area_size());
IncreaseAllocatedBytes(page->allocated_bytes(), page); IncreaseAllocatedBytes(page->allocated_bytes(), page);
...@@ -1619,7 +1590,7 @@ size_t PagedSpace::AddPage(Page* page) { ...@@ -1619,7 +1590,7 @@ size_t PagedSpace::AddPage(Page* page) {
void PagedSpace::RemovePage(Page* page) { void PagedSpace::RemovePage(Page* page) {
CHECK(page->SweepingDone()); CHECK(page->SweepingDone());
page->Unlink(); memory_chunk_list_.Remove(page);
UnlinkFreeListCategories(page); UnlinkFreeListCategories(page);
DecreaseAllocatedBytes(page->allocated_bytes(), page); DecreaseAllocatedBytes(page->allocated_bytes(), page);
DecreaseCapacity(page->area_size()); DecreaseCapacity(page->area_size());
...@@ -1815,11 +1786,6 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -1815,11 +1786,6 @@ void PagedSpace::ReleasePage(Page* page) {
allocation_info_.Reset(kNullAddress, kNullAddress); allocation_info_.Reset(kNullAddress, kNullAddress);
} }
// If page is still in a list, unlink it from that list.
if (page->next_chunk() != nullptr) {
DCHECK_NOT_NULL(page->prev_chunk());
page->Unlink();
}
AccountUncommitted(page->size()); AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size()); accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
...@@ -2101,23 +2067,29 @@ bool SemiSpace::EnsureCurrentCapacity() { ...@@ -2101,23 +2067,29 @@ bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) { if (is_committed()) {
const int expected_pages = const int expected_pages =
static_cast<int>(current_capacity_ / Page::kPageSize); static_cast<int>(current_capacity_ / Page::kPageSize);
MemoryChunk* current_page = first_page();
int actual_pages = 0; int actual_pages = 0;
Page* current_page = anchor()->next_page();
while (current_page != anchor()) { // First iterate through the pages list until expected pages if so many
// pages exist.
while (current_page != nullptr && actual_pages < expected_pages) {
actual_pages++; actual_pages++;
current_page = current_page->next_page(); current_page = current_page->list_node().next();
if (actual_pages > expected_pages) { }
Page* to_remove = current_page->prev_page();
// Make sure we don't overtake the actual top pointer. // Free all overallocated pages which are behind current_page.
CHECK_NE(to_remove, current_page_); while (current_page) {
to_remove->Unlink(); MemoryChunk* next_current = current_page->list_node().next();
// Clear new space flags to avoid this page being treated as a new memory_chunk_list_.Remove(current_page);
// space page that is potentially being swept. // Clear new space flags to avoid this page being treated as a new
to_remove->SetFlags(0, Page::kIsInNewSpaceMask); // space page that is potentially being swept.
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>( current_page->SetFlags(0, Page::kIsInNewSpaceMask);
to_remove); heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
} current_page);
current_page = next_current;
} }
// Add more pages if we have less than expected_pages.
IncrementalMarking::NonAtomicMarkingState* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) { while (actual_pages < expected_pages) {
...@@ -2127,9 +2099,9 @@ bool SemiSpace::EnsureCurrentCapacity() { ...@@ -2127,9 +2099,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
Page::kAllocatableMemory, this, NOT_EXECUTABLE); Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (current_page == nullptr) return false; if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page); DCHECK_NOT_NULL(current_page);
current_page->InsertAfter(anchor()); memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page); marking_state->ClearLiveness(current_page);
current_page->SetFlags(anchor()->prev_page()->GetFlags(), current_page->SetFlags(first_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags)); static_cast<uintptr_t>(Page::kCopyAllFlags));
heap()->CreateFillerObjectAt(current_page->area_start(), heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()), static_cast<int>(current_page->area_size()),
...@@ -2398,8 +2370,6 @@ void NewSpace::Verify() { ...@@ -2398,8 +2370,6 @@ void NewSpace::Verify() {
} else { } else {
// At end of page, switch to next page. // At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page(); Page* page = Page::FromAllocationAreaAddress(current)->next_page();
// Next page should be valid.
CHECK(!page->is_anchor());
current = page->area_start(); current = page->area_start();
} }
} }
...@@ -2435,18 +2405,16 @@ void SemiSpace::TearDown() { ...@@ -2435,18 +2405,16 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() { bool SemiSpace::Commit() {
DCHECK(!is_committed()); DCHECK(!is_committed());
Page* current = anchor();
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize); const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) { for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page = Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>( heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, NOT_EXECUTABLE); Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (new_page == nullptr) { if (new_page == nullptr) {
RewindPages(current, pages_added); RewindPages(pages_added);
return false; return false;
} }
new_page->InsertAfter(current); memory_chunk_list_.PushBack(new_page);
current = new_page;
} }
Reset(); Reset();
AccountCommitted(current_capacity_); AccountCommitted(current_capacity_);
...@@ -2460,12 +2428,12 @@ bool SemiSpace::Commit() { ...@@ -2460,12 +2428,12 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() { bool SemiSpace::Uncommit() {
DCHECK(is_committed()); DCHECK(is_committed());
for (auto it = begin(); it != end();) { while (!memory_chunk_list_.Empty()) {
Page* p = *(it++); MemoryChunk* chunk = memory_chunk_list_.front();
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p); memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
} }
anchor()->set_next_page(anchor()); current_page_ = nullptr;
anchor()->set_prev_page(anchor());
AccountUncommitted(current_capacity_); AccountUncommitted(current_capacity_);
committed_ = false; committed_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
...@@ -2492,8 +2460,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) { ...@@ -2492,8 +2460,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
const size_t delta = new_capacity - current_capacity_; const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, AllocatePageSize())); DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize); const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page(); DCHECK(last_page());
DCHECK_NE(last_page, anchor());
IncrementalMarking::NonAtomicMarkingState* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) { for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
...@@ -2501,29 +2468,26 @@ bool SemiSpace::GrowTo(size_t new_capacity) { ...@@ -2501,29 +2468,26 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>( heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
Page::kAllocatableMemory, this, NOT_EXECUTABLE); Page::kAllocatableMemory, this, NOT_EXECUTABLE);
if (new_page == nullptr) { if (new_page == nullptr) {
RewindPages(last_page, pages_added); RewindPages(pages_added);
return false; return false;
} }
new_page->InsertAfter(last_page); memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page); marking_state->ClearLiveness(new_page);
// Duplicate the flags that was set on the old page. // Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask); new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
last_page = new_page;
} }
AccountCommitted(delta); AccountCommitted(delta);
current_capacity_ = new_capacity; current_capacity_ = new_capacity;
return true; return true;
} }
void SemiSpace::RewindPages(Page* start, int num_pages) { void SemiSpace::RewindPages(int num_pages) {
Page* new_last_page = nullptr; DCHECK_GT(num_pages, 0);
Page* last_page = start; DCHECK(last_page());
while (num_pages > 0) { while (num_pages > 0) {
DCHECK_NE(last_page, anchor()); MemoryChunk* last = last_page();
new_last_page = last_page->prev_page(); memory_chunk_list_.Remove(last);
last_page->prev_page()->set_next_page(last_page->next_page()); heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
last_page->next_page()->set_prev_page(last_page->prev_page());
last_page = new_last_page;
num_pages--; num_pages--;
} }
} }
...@@ -2534,19 +2498,9 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) { ...@@ -2534,19 +2498,9 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
DCHECK_LT(new_capacity, current_capacity_); DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) { if (is_committed()) {
const size_t delta = current_capacity_ - new_capacity; const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, AllocatePageSize())); DCHECK(IsAligned(delta, Page::kPageSize));
int delta_pages = static_cast<int>(delta / Page::kPageSize); int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page; RewindPages(delta_pages);
Page* last_page;
while (delta_pages > 0) {
last_page = anchor()->prev_page();
new_last_page = last_page->prev_page();
new_last_page->set_next_page(anchor());
anchor()->set_prev_page(new_last_page);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
last_page);
delta_pages--;
}
AccountUncommitted(delta); AccountUncommitted(delta);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
} }
...@@ -2555,10 +2509,6 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) { ...@@ -2555,10 +2509,6 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) {
} }
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
anchor_.set_owner(this);
anchor_.prev_page()->set_next_page(&anchor_);
anchor_.next_page()->set_prev_page(&anchor_);
for (Page* page : *this) { for (Page* page : *this) {
page->set_owner(this); page->set_owner(this);
page->SetFlags(flags, mask); page->SetFlags(flags, mask);
...@@ -2579,30 +2529,33 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { ...@@ -2579,30 +2529,33 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
void SemiSpace::Reset() { void SemiSpace::Reset() {
DCHECK_NE(anchor_.next_page(), &anchor_); DCHECK(first_page());
current_page_ = anchor_.next_page(); DCHECK(last_page());
current_page_ = first_page();
pages_used_ = 0; pages_used_ = 0;
} }
void SemiSpace::RemovePage(Page* page) { void SemiSpace::RemovePage(Page* page) {
if (current_page_ == page) { if (current_page_ == page) {
current_page_ = page->prev_page(); if (page->prev_page()) {
current_page_ = page->prev_page();
}
} }
page->Unlink(); memory_chunk_list_.Remove(page);
} }
void SemiSpace::PrependPage(Page* page) { void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), page->SetFlags(current_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags)); static_cast<uintptr_t>(Page::kCopyAllFlags));
page->set_owner(this); page->set_owner(this);
page->InsertAfter(anchor()); memory_chunk_list_.PushFront(page);
pages_used_++; pages_used_++;
} }
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them. // We won't be swapping semispaces without data in them.
DCHECK_NE(from->anchor_.next_page(), &from->anchor_); DCHECK(from->first_page());
DCHECK_NE(to->anchor_.next_page(), &to->anchor_); DCHECK(to->first_page());
intptr_t saved_to_space_flags = to->current_page()->GetFlags(); intptr_t saved_to_space_flags = to->current_page()->GetFlags();
...@@ -2612,7 +2565,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { ...@@ -2612,7 +2565,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->minimum_capacity_, to->minimum_capacity_); std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_); std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_); std::swap(from->committed_, to->committed_);
std::swap(from->anchor_, to->anchor_); std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
std::swap(from->current_page_, to->current_page_); std::swap(from->current_page_, to->current_page_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask); to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
...@@ -2640,9 +2593,7 @@ void SemiSpace::Print() {} ...@@ -2640,9 +2593,7 @@ void SemiSpace::Print() {}
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void SemiSpace::Verify() { void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace); bool is_from_space = (id_ == kFromSpace);
Page* page = anchor_.next_page(); for (Page* page : *this) {
CHECK(anchor_.owner() == this);
while (page != &anchor_) {
CHECK_EQ(page->owner(), this); CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace()); CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
...@@ -2660,8 +2611,8 @@ void SemiSpace::Verify() { ...@@ -2660,8 +2611,8 @@ void SemiSpace::Verify() {
!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
} }
} }
CHECK_EQ(page->prev_page()->next_page(), page); CHECK_IMPLIES(page->list_node().prev(),
page = page->next_page(); page->list_node().prev()->list_node().next() == page);
} }
} }
#endif #endif
...@@ -2681,8 +2632,8 @@ void SemiSpace::AssertValidRange(Address start, Address end) { ...@@ -2681,8 +2632,8 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
} else { } else {
while (page != end_page) { while (page != end_page) {
page = page->next_page(); page = page->next_page();
DCHECK_NE(page, space->anchor());
} }
DCHECK(page);
} }
} }
#endif #endif
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "src/allocation.h" #include "src/allocation.h"
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/iterator.h" #include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/cancelable-task.h" #include "src/cancelable-task.h"
#include "src/flags.h" #include "src/flags.h"
...@@ -303,12 +304,9 @@ class MemoryChunk { ...@@ -303,12 +304,9 @@ class MemoryChunk {
// triggering on the same page. // triggering on the same page.
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16, COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
// |ANCHOR|: Flag is set if page is an anchor.
ANCHOR = 1u << 17,
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page. // to iterate the page.
SWEEP_TO_ITERATE = 1u << 18 SWEEP_TO_ITERATE = 1u << 17
}; };
using Flags = uintptr_t; using Flags = uintptr_t;
...@@ -362,17 +360,16 @@ class MemoryChunk { ...@@ -362,17 +360,16 @@ class MemoryChunk {
+ kIntptrSize // intptr_t live_byte_count_ + kIntptrSize // intptr_t live_byte_count_
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // InvalidatedSlots* invalidated_slots_ + kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_ + kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_ + kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_ + kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_ + kPointerSize // base::AtomicWord concurrent_sweeping_
+ kPointerSize // base::Mutex* page_protection_change_mutex_ + kPointerSize // base::Mutex* page_protection_change_mutex_
+ kPointerSize // unitptr_t write_unprotect_counter_ + kPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize // size_t allocated_bytes_ + kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_ + kSizetSize // size_t wasted_memory_
+ kPointerSize // AtomicValue next_chunk_ + kPointerSize * 2 // base::ListNode
+ kPointerSize // AtomicValue prev_chunk_
+ kPointerSize * kNumberOfCategories + kPointerSize * kNumberOfCategories
// FreeListCategory categories_[kNumberOfCategories] // FreeListCategory categories_[kNumberOfCategories]
+ kPointerSize // LocalArrayBufferTracker* local_tracker_ + kPointerSize // LocalArrayBufferTracker* local_tracker_
...@@ -606,23 +603,12 @@ class MemoryChunk { ...@@ -606,23 +603,12 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
MemoryChunk* next_chunk() { return next_chunk_.Value(); }
MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const { return owner_.Value(); } Space* owner() const { return owner_.Value(); }
void set_owner(Space* space) { owner_.SetValue(space); } void set_owner(Space* space) { owner_.SetValue(space); }
bool IsPagedSpace() const; bool IsPagedSpace() const;
void InsertAfter(MemoryChunk* other);
void Unlink();
// Emits a memory barrier. For TSAN builds the other thread needs to perform // Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier. // MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence(); void InitializationMemoryFence();
...@@ -630,6 +616,8 @@ class MemoryChunk { ...@@ -630,6 +616,8 @@ class MemoryChunk {
void SetReadAndExecutable(); void SetReadAndExecutable();
void SetReadAndWritable(); void SetReadAndWritable();
base::ListNode<MemoryChunk>& list_node() { return list_node_; }
protected: protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end, Address area_start, Address area_end,
...@@ -702,10 +690,7 @@ class MemoryChunk { ...@@ -702,10 +690,7 @@ class MemoryChunk {
// Freed memory that was not added to the free list. // Freed memory that was not added to the free list.
size_t wasted_memory_; size_t wasted_memory_;
// next_chunk_ holds a pointer of type MemoryChunk base::ListNode<MemoryChunk> list_node_;
base::AtomicValue<MemoryChunk*> next_chunk_;
// prev_chunk_ holds a pointer of type MemoryChunk
base::AtomicValue<MemoryChunk*> prev_chunk_;
FreeListCategory* categories_[kNumberOfCategories]; FreeListCategory* categories_[kNumberOfCategories];
...@@ -779,18 +764,12 @@ class Page : public MemoryChunk { ...@@ -779,18 +764,12 @@ class Page : public MemoryChunk {
static Page* ConvertNewToOld(Page* old_page); static Page* ConvertNewToOld(Page* old_page);
// Create a Page object that is only used as anchor for the doubly-linked
// list of real pages.
explicit Page(Space* owner) { InitializeAsAnchor(owner); }
inline void MarkNeverAllocateForTesting(); inline void MarkNeverAllocateForTesting();
inline void MarkEvacuationCandidate(); inline void MarkEvacuationCandidate();
inline void ClearEvacuationCandidate(); inline void ClearEvacuationCandidate();
Page* next_page() { return static_cast<Page*>(next_chunk()); } Page* next_page() { return static_cast<Page*>(list_node_.next()); }
Page* prev_page() { return static_cast<Page*>(prev_chunk()); } Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
void set_next_page(Page* page) { set_next_chunk(page); }
void set_prev_page(Page* page) { set_prev_chunk(page); }
template <typename Callback> template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) { inline void ForAllFreeListCategories(Callback callback) {
...@@ -830,8 +809,6 @@ class Page : public MemoryChunk { ...@@ -830,8 +809,6 @@ class Page : public MemoryChunk {
return categories_[type]; return categories_[type];
} }
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
size_t wasted_memory() { return wasted_memory_; } size_t wasted_memory() { return wasted_memory_; }
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; } void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
size_t allocated_bytes() { return allocated_bytes_; } size_t allocated_bytes() { return allocated_bytes_; }
...@@ -863,8 +840,6 @@ class Page : public MemoryChunk { ...@@ -863,8 +840,6 @@ class Page : public MemoryChunk {
private: private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
void InitializeAsAnchor(Space* owner);
friend class MemoryAllocator; friend class MemoryAllocator;
}; };
...@@ -880,10 +855,10 @@ class LargePage : public MemoryChunk { ...@@ -880,10 +855,10 @@ class LargePage : public MemoryChunk {
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
inline LargePage* next_page() { inline LargePage* next_page() {
return static_cast<LargePage*>(next_chunk()); return static_cast<LargePage*>(list_node_.next());
} }
inline void set_next_page(LargePage* page) { set_next_chunk(page); } inline void set_next_page(LargePage* page) { list_node_.set_next(page); }
// Uncommit memory that is not in use anymore by the object. If the object // Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned. // cannot be shrunk 0 is returned.
...@@ -994,6 +969,11 @@ class Space : public Malloced { ...@@ -994,6 +969,11 @@ class Space : public Malloced {
V8_EXPORT_PRIVATE void* GetRandomMmapAddr(); V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
#ifdef DEBUG #ifdef DEBUG
virtual void Print() = 0; virtual void Print() = 0;
#endif #endif
...@@ -1006,6 +986,9 @@ class Space : public Malloced { ...@@ -1006,6 +986,9 @@ class Space : public Malloced {
std::vector<AllocationObserver*> allocation_observers_; std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
base::List<MemoryChunk> memory_chunk_list_;
private: private:
bool allocation_observers_paused_; bool allocation_observers_paused_;
Heap* heap_; Heap* heap_;
...@@ -2196,11 +2179,10 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2196,11 +2179,10 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page); void RefineAllocatedBytesAfterSweeping(Page* page);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
Page* InitializePage(MemoryChunk* chunk, Executability executable); Page* InitializePage(MemoryChunk* chunk, Executability executable);
void ReleasePage(Page* page); void ReleasePage(Page* page);
// Adds the page to this space and returns the number of bytes added to the // Adds the page to this space and returns the number of bytes added to the
// free list of the space. // free list of the space.
size_t AddPage(Page* page); size_t AddPage(Page* page);
...@@ -2234,9 +2216,6 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2234,9 +2216,6 @@ class V8_EXPORT_PRIVATE PagedSpace
static void ResetCodeStatistics(Isolate* isolate); static void ResetCodeStatistics(Isolate* isolate);
#endif #endif
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
bool CanExpand(size_t size); bool CanExpand(size_t size);
// Returns the number of total pages in this space. // Returns the number of total pages in this space.
...@@ -2262,8 +2241,10 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2262,8 +2241,10 @@ class V8_EXPORT_PRIVATE PagedSpace
inline void UnlinkFreeListCategories(Page* page); inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page); inline size_t RelinkFreeListCategories(Page* page);
iterator begin() { return iterator(anchor_.next_page()); } Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
iterator end() { return iterator(&anchor_); }
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
// Shrink immortal immovable pages of the space to be exactly the size needed // Shrink immortal immovable pages of the space to be exactly the size needed
// using the high water mark. // using the high water mark.
...@@ -2294,7 +2275,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2294,7 +2275,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// smaller, initial pages. // smaller, initial pages.
virtual bool snapshotable() { return true; } virtual bool snapshotable() { return true; }
bool HasPages() { return anchor_.next_page() != &anchor_; } bool HasPages() { return first_page() != nullptr; }
// Cleans up the space, frees all pages in this space except those belonging // Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk. // to the initial chunk, uncommits addresses in the initial chunk.
...@@ -2346,9 +2327,6 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2346,9 +2327,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Accounting information for this space. // Accounting information for this space.
AllocationStats accounting_stats_; AllocationStats accounting_stats_;
// The dummy page that anchors the double linked list of pages.
Page anchor_;
// The space's free list. // The space's free list.
FreeList free_list_; FreeList free_list_;
...@@ -2384,7 +2362,6 @@ class SemiSpace : public Space { ...@@ -2384,7 +2362,6 @@ class SemiSpace : public Space {
age_mark_(kNullAddress), age_mark_(kNullAddress),
committed_(false), committed_(false),
id_(semispace), id_(semispace),
anchor_(this),
current_page_(nullptr), current_page_(nullptr),
pages_used_(0) {} pages_used_(0) {}
...@@ -2413,16 +2390,15 @@ class SemiSpace : public Space { ...@@ -2413,16 +2390,15 @@ class SemiSpace : public Space {
// Returns the start address of the first page of the space. // Returns the start address of the first page of the space.
Address space_start() { Address space_start() {
DCHECK_NE(anchor_.next_page(), anchor()); DCHECK_NE(memory_chunk_list_.front(), nullptr);
return anchor_.next_page()->area_start(); return memory_chunk_list_.front()->area_start();
} }
Page* first_page() { return anchor_.next_page(); }
Page* current_page() { return current_page_; } Page* current_page() { return current_page_; }
int pages_used() { return pages_used_; } int pages_used() { return pages_used_; }
// Returns one past the end address of the space. // Returns one past the end address of the space.
Address space_end() { return anchor_.prev_page()->area_end(); } Address space_end() { return memory_chunk_list_.back()->area_end(); }
// Returns the start address of the current page of the space. // Returns the start address of the current page of the space.
Address page_low() { return current_page_->area_start(); } Address page_low() { return current_page_->area_start(); }
...@@ -2436,7 +2412,7 @@ class SemiSpace : public Space { ...@@ -2436,7 +2412,7 @@ class SemiSpace : public Space {
// that we need to account for the next page already for this check as we // that we need to account for the next page already for this check as we
// could potentially fill the whole page after advancing. // could potentially fill the whole page after advancing.
const bool reached_max_pages = (pages_used_ + 1) == max_pages(); const bool reached_max_pages = (pages_used_ + 1) == max_pages();
if (next_page == anchor() || reached_max_pages) { if (next_page == nullptr || reached_max_pages) {
return false; return false;
} }
current_page_ = next_page; current_page_ = next_page;
...@@ -2449,6 +2425,7 @@ class SemiSpace : public Space { ...@@ -2449,6 +2425,7 @@ class SemiSpace : public Space {
void RemovePage(Page* page); void RemovePage(Page* page);
void PrependPage(Page* page); void PrependPage(Page* page);
Page* InitializePage(MemoryChunk* chunk, Executability executable); Page* InitializePage(MemoryChunk* chunk, Executability executable);
// Age mark accessors. // Age mark accessors.
...@@ -2482,8 +2459,10 @@ class SemiSpace : public Space { ...@@ -2482,8 +2459,10 @@ class SemiSpace : public Space {
UNREACHABLE(); UNREACHABLE();
} }
iterator begin() { return iterator(anchor_.next_page()); } Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
iterator end() { return iterator(anchor()); }
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator() override; std::unique_ptr<ObjectIterator> GetObjectIterator() override;
...@@ -2503,9 +2482,8 @@ class SemiSpace : public Space { ...@@ -2503,9 +2482,8 @@ class SemiSpace : public Space {
#endif #endif
private: private:
void RewindPages(Page* start, int num_pages); void RewindPages(int num_pages);
inline Page* anchor() { return &anchor_; }
inline int max_pages() { inline int max_pages() {
return static_cast<int>(current_capacity_ / Page::kPageSize); return static_cast<int>(current_capacity_ / Page::kPageSize);
} }
...@@ -2529,8 +2507,8 @@ class SemiSpace : public Space { ...@@ -2529,8 +2507,8 @@ class SemiSpace : public Space {
bool committed_; bool committed_;
SemiSpaceId id_; SemiSpaceId id_;
Page anchor_;
Page* current_page_; Page* current_page_;
int pages_used_; int pages_used_;
friend class NewSpace; friend class NewSpace;
......
...@@ -286,7 +286,7 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) { ...@@ -286,7 +286,7 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
case RO_SPACE: case RO_SPACE:
if (isolate()->heap()->deserialization_complete()) { if (isolate()->heap()->deserialization_complete()) {
PagedSpace* read_only_space = isolate()->heap()->read_only_space(); PagedSpace* read_only_space = isolate()->heap()->read_only_space();
Page* page = read_only_space->FirstPage(); Page* page = read_only_space->first_page();
for (uint32_t i = 0; i < back_reference.chunk_index(); ++i) { for (uint32_t i = 0; i < back_reference.chunk_index(); ++i) {
page = page->next_page(); page = page->next_page();
} }
......
...@@ -5817,7 +5817,7 @@ HEAP_TEST(Regress5831) { ...@@ -5817,7 +5817,7 @@ HEAP_TEST(Regress5831) {
// Generate the code. // Generate the code.
Handle<Code> code = GenerateDummyImmovableCode(isolate); Handle<Code> code = GenerateDummyImmovableCode(isolate);
CHECK_GE(i::kMaxRegularHeapObjectSize, code->Size()); CHECK_GE(i::kMaxRegularHeapObjectSize, code->Size());
CHECK(!heap->code_space()->FirstPage()->Contains(code->address())); CHECK(!heap->code_space()->first_page()->Contains(code->address()));
// Ensure it's not in large object space. // Ensure it's not in large object space.
MemoryChunk* chunk = MemoryChunk::FromAddress(code->address()); MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
......
...@@ -209,15 +209,17 @@ TEST(MemoryAllocator) { ...@@ -209,15 +209,17 @@ TEST(MemoryAllocator) {
{ {
int total_pages = 0; int total_pages = 0;
OldSpace faked_space(heap); OldSpace faked_space(heap);
CHECK(!faked_space.first_page());
CHECK(!faked_space.last_page());
Page* first_page = memory_allocator->AllocatePage( Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE); NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page()); faked_space.memory_chunk_list().PushBack(first_page);
CHECK(first_page->next_page() == faked_space.anchor()); CHECK(first_page->next_page() == nullptr);
total_pages++; total_pages++;
for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { for (Page* p = first_page; p != nullptr; p = p->next_page()) {
CHECK(p->owner() == &faked_space); CHECK(p->owner() == &faked_space);
} }
...@@ -226,9 +228,9 @@ TEST(MemoryAllocator) { ...@@ -226,9 +228,9 @@ TEST(MemoryAllocator) {
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE); NOT_EXECUTABLE);
total_pages++; total_pages++;
other->InsertAfter(first_page); faked_space.memory_chunk_list().PushBack(other);
int page_count = 0; int page_count = 0;
for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { for (Page* p = first_page; p != nullptr; p = p->next_page()) {
CHECK(p->owner() == &faked_space); CHECK(p->owner() == &faked_space);
page_count++; page_count++;
} }
......
...@@ -163,7 +163,7 @@ TEST(Regress2060a) { ...@@ -163,7 +163,7 @@ TEST(Regress2060a) {
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap(); Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
// Start second old-space page so that values land on evacuation candidate. // Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page(); Page* first_page = heap->old_space()->first_page();
heap::SimulateFullSpace(heap->old_space()); heap::SimulateFullSpace(heap->old_space());
// Fill up weak map with values on an evacuation candidate. // Fill up weak map with values on an evacuation candidate.
...@@ -202,7 +202,7 @@ TEST(Regress2060b) { ...@@ -202,7 +202,7 @@ TEST(Regress2060b) {
factory->NewFunctionForTest(factory->function_string()); factory->NewFunctionForTest(factory->function_string());
// Start second old-space page so that keys land on evacuation candidate. // Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page(); Page* first_page = heap->old_space()->first_page();
heap::SimulateFullSpace(heap->old_space()); heap::SimulateFullSpace(heap->old_space());
// Fill up weak map with keys on an evacuation candidate. // Fill up weak map with keys on an evacuation candidate.
......
...@@ -174,7 +174,7 @@ TEST(WeakSet_Regress2060a) { ...@@ -174,7 +174,7 @@ TEST(WeakSet_Regress2060a) {
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate); Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
// Start second old-space page so that values land on evacuation candidate. // Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page(); Page* first_page = heap->old_space()->first_page();
heap::SimulateFullSpace(heap->old_space()); heap::SimulateFullSpace(heap->old_space());
// Fill up weak set with values on an evacuation candidate. // Fill up weak set with values on an evacuation candidate.
...@@ -213,7 +213,7 @@ TEST(WeakSet_Regress2060b) { ...@@ -213,7 +213,7 @@ TEST(WeakSet_Regress2060b) {
factory->NewFunctionForTest(factory->function_string()); factory->NewFunctionForTest(factory->function_string());
// Start second old-space page so that keys land on evacuation candidate. // Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page(); Page* first_page = heap->old_space()->first_page();
heap::SimulateFullSpace(heap->old_space()); heap::SimulateFullSpace(heap->old_space());
// Fill up weak set with keys on an evacuation candidate. // Fill up weak set with keys on an evacuation candidate.
......
...@@ -63,6 +63,7 @@ v8_source_set("unittests_sources") { ...@@ -63,6 +63,7 @@ v8_source_set("unittests_sources") {
"base/functional-unittest.cc", "base/functional-unittest.cc",
"base/ieee754-unittest.cc", "base/ieee754-unittest.cc",
"base/iterator-unittest.cc", "base/iterator-unittest.cc",
"base/list-unittest.cc",
"base/logging-unittest.cc", "base/logging-unittest.cc",
"base/macros-unittest.cc", "base/macros-unittest.cc",
"base/ostreams-unittest.cc", "base/ostreams-unittest.cc",
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/list.h"
#include "testing/gtest-support.h"
namespace v8 {
namespace base {
class TestChunk {
public:
base::ListNode<TestChunk>& list_node() { return list_node_; }
base::ListNode<TestChunk> list_node_;
};
TEST(List, InsertAtTailAndRemove) {
List<TestChunk> list;
EXPECT_TRUE(list.Empty());
TestChunk t1;
list.PushBack(&t1);
EXPECT_FALSE(list.Empty());
EXPECT_TRUE(list.Contains(&t1));
list.Remove(&t1);
EXPECT_TRUE(list.Empty());
}
TEST(List, InsertAtHeadAndRemove) {
List<TestChunk> list;
EXPECT_TRUE(list.Empty());
TestChunk t1;
list.PushFront(&t1);
EXPECT_FALSE(list.Empty());
list.Remove(&t1);
EXPECT_TRUE(list.Empty());
}
TEST(List, InsertMultipleAtTailAndRemoveFromTail) {
List<TestChunk> list;
EXPECT_TRUE(list.Empty());
const int kSize = 10;
TestChunk chunks[kSize];
for (int i = 0; i < kSize; i++) {
list.PushBack(&chunks[i]);
EXPECT_EQ(list.back(), &chunks[i]);
}
for (int i = kSize - 1; i > 0; i--) {
list.Remove(&chunks[i]);
EXPECT_EQ(list.back(), &chunks[i - 1]);
}
list.Remove(&chunks[0]);
EXPECT_TRUE(list.Empty());
}
TEST(List, InsertMultipleAtHeadAndRemoveFromHead) {
List<TestChunk> list;
EXPECT_TRUE(list.Empty());
const int kSize = 10;
TestChunk chunks[kSize];
for (int i = 0; i < kSize; i++) {
list.PushFront(&chunks[i]);
EXPECT_EQ(list.front(), &chunks[i]);
}
for (int i = kSize - 1; i > 0; i--) {
list.Remove(&chunks[i]);
EXPECT_EQ(list.front(), &chunks[i - 1]);
}
list.Remove(&chunks[0]);
EXPECT_TRUE(list.Empty());
}
TEST(List, InsertMultipleAtTailAndRemoveFromMiddle) {
List<TestChunk> list;
EXPECT_TRUE(list.Empty());
const int kSize = 10;
TestChunk chunks[kSize];
for (int i = 0; i < kSize; i++) {
list.PushBack(&chunks[i]);
EXPECT_EQ(list.back(), &chunks[i]);
}
int i, j;
for (i = kSize / 2 - 1, j = kSize / 2; i >= 0; i--, j++) {
list.Remove(&chunks[i]);
list.Remove(&chunks[j]);
}
EXPECT_TRUE(list.Empty());
}
} // namespace base
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment