Commit 4e8736da authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Merge NewSpacePage into Page

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1900423002

Cr-Commit-Position: refs/heads/master@{#35768}
parent fac7361c
...@@ -395,7 +395,7 @@ bool Heap::OldGenerationAllocationLimitReached() { ...@@ -395,7 +395,7 @@ bool Heap::OldGenerationAllocationLimitReached() {
bool Heap::ShouldBePromoted(Address old_address, int object_size) { bool Heap::ShouldBePromoted(Address old_address, int object_size) {
NewSpacePage* page = NewSpacePage::FromAddress(old_address); Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_.age_mark(); Address age_mark = new_space_.age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark); (!page->ContainsLimit(age_mark) || old_address < age_mark);
...@@ -476,7 +476,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { ...@@ -476,7 +476,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address object_address = object->address(); Address object_address = object->address();
Address memento_address = object_address + object->Size(); Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize; Address last_memento_word_address = memento_address + kPointerSize;
if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) { if (!Page::OnSamePage(object_address, last_memento_word_address)) {
return nullptr; return nullptr;
} }
HeapObject* candidate = HeapObject::FromAddress(memento_address); HeapObject* candidate = HeapObject::FromAddress(memento_address);
...@@ -504,7 +504,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { ...@@ -504,7 +504,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
top = NewSpaceTop(); top = NewSpaceTop();
DCHECK(memento_address == top || DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top || memento_address + HeapObject::kHeaderSize <= top ||
!NewSpacePage::OnSamePage(memento_address, top - 1)); !Page::OnSamePage(memento_address, top - 1));
if ((memento_address != top) && memento_candidate->IsValid()) { if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate; return memento_candidate;
} }
......
...@@ -945,7 +945,7 @@ void Heap::EnsureFillerObjectAtTop() { ...@@ -945,7 +945,7 @@ void Heap::EnsureFillerObjectAtTop() {
// may be uninitialized memory behind top. We fill the remainder of the page // may be uninitialized memory behind top. We fill the remainder of the page
// with a filler. // with a filler.
Address to_top = new_space_.top(); Address to_top = new_space_.top();
NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize); Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) { if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top); int remaining_in_page = static_cast<int>(page->area_end() - to_top);
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo); CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
...@@ -1552,7 +1552,8 @@ void PromotionQueue::Initialize() { ...@@ -1552,7 +1552,8 @@ void PromotionQueue::Initialize() {
front_ = rear_ = front_ = rear_ =
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd()); reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<struct Entry*>( limit_ = reinterpret_cast<struct Entry*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start()); Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
->area_start());
emergency_stack_ = NULL; emergency_stack_ = NULL;
} }
...@@ -1560,7 +1561,7 @@ void PromotionQueue::Initialize() { ...@@ -1560,7 +1561,7 @@ void PromotionQueue::Initialize() {
void PromotionQueue::RelocateQueueHead() { void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL); DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
struct Entry* head_start = rear_; struct Entry* head_start = rear_;
struct Entry* head_end = struct Entry* head_end =
Min(front_, reinterpret_cast<struct Entry*>(p->area_end())); Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
...@@ -1909,13 +1910,14 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, ...@@ -1909,13 +1910,14 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue of unprocessed copied objects. Process them until the // queue of unprocessed copied objects. Process them until the
// queue is empty. // queue is empty.
while (new_space_front != new_space_.top()) { while (new_space_front != new_space_.top()) {
if (!NewSpacePage::IsAtEnd(new_space_front)) { if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front); HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front += new_space_front +=
StaticScavengeVisitor::IterateBody(object->map(), object); StaticScavengeVisitor::IterateBody(object->map(), object);
} else { } else {
new_space_front = new_space_front = Page::FromAllocationAreaAddress(new_space_front)
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); ->next_page()
->area_start();
} }
} }
...@@ -4629,7 +4631,7 @@ void Heap::ZapFromSpace() { ...@@ -4629,7 +4631,7 @@ void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(), NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd()); new_space_.FromSpaceEnd());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
for (Address cursor = page->area_start(), limit = page->area_end(); for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) { cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue; Memory::Address_at(cursor) = kFromSpaceZapValue;
......
...@@ -322,7 +322,7 @@ class PromotionQueue { ...@@ -322,7 +322,7 @@ class PromotionQueue {
} }
Page* GetHeadPage() { Page* GetHeadPage() {
return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
} }
void SetNewLimit(Address limit) { void SetNewLimit(Address limit) {
...@@ -330,7 +330,7 @@ class PromotionQueue { ...@@ -330,7 +330,7 @@ class PromotionQueue {
if (emergency_stack_) return; if (emergency_stack_) return;
// If the limit is not on the same page, we can ignore it. // If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return; if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<struct Entry*>(limit); limit_ = reinterpret_cast<struct Entry*>(limit);
......
...@@ -348,7 +348,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( ...@@ -348,7 +348,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) { NewSpace* space) {
NewSpacePageIterator it(space); NewSpacePageIterator it(space);
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); Page* p = it.next();
SetNewSpacePageFlags(p, false); SetNewSpacePageFlags(p, false);
} }
} }
...@@ -361,7 +361,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() { ...@@ -361,7 +361,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page(); LargePage* lop = heap_->lo_space()->first_page();
while (lop->is_valid()) { while (LargePage::IsValid(lop)) {
SetOldSpacePageFlags(lop, false, false); SetOldSpacePageFlags(lop, false, false);
lop = lop->next_page(); lop = lop->next_page();
} }
...@@ -380,7 +380,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { ...@@ -380,7 +380,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); Page* p = it.next();
SetNewSpacePageFlags(p, true); SetNewSpacePageFlags(p, true);
} }
} }
...@@ -393,7 +393,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() { ...@@ -393,7 +393,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->new_space()); ActivateIncrementalWriteBarrier(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page(); LargePage* lop = heap_->lo_space()->first_page();
while (lop->is_valid()) { while (LargePage::IsValid(lop)) {
SetOldSpacePageFlags(lop, true, is_compacting_); SetOldSpacePageFlags(lop, true, is_compacting_);
lop = lop->next_page(); lop = lop->next_page();
} }
......
...@@ -185,7 +185,7 @@ class IncrementalMarking { ...@@ -185,7 +185,7 @@ class IncrementalMarking {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
} }
inline void SetNewSpacePageFlags(MemoryChunk* chunk) { inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking()); SetNewSpacePageFlags(chunk, IsMarking());
} }
......
...@@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) { ...@@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), end); NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use // The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages. // page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(), CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
NewSpacePage::FromAddress(space->bottom())->area_start());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
Address limit = it.has_next() ? page->area_end() : end; Address limit = it.has_next() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end)); CHECK(limit == end || !page->Contains(end));
VerifyMarking(space->heap(), page->area_start(), limit); VerifyMarking(space->heap(), page->area_start(), limit);
...@@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) { ...@@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) {
VerifyEvacuationVisitor visitor; VerifyEvacuationVisitor visitor;
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
Address current = page->area_start(); Address current = page->area_start();
Address limit = it.has_next() ? page->area_end() : space->top(); Address limit = it.has_next() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top())); CHECK(limit == space->top() || !page->Contains(space->top()));
...@@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { ...@@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), space->top()); NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); Page* p = it.next();
CHECK(p->markbits()->IsClean()); CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes()); CHECK_EQ(0, p->LiveBytes());
} }
...@@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final ...@@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
public: public:
EvacuateNewSpacePageVisitor() : promoted_size_(0) {} EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) { static void MoveToOldSpace(Page* page, PagedSpace* owner) {
page->heap()->new_space()->ReplaceWithEmptyPage(page); page->heap()->new_space()->ReplaceWithEmptyPage(page);
Page* new_page = Page::Convert(page, owner); Page* new_page = Page::ConvertNewToOld(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
} }
...@@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() { ...@@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space(); NewSpace* space = heap()->new_space();
NewSpacePageIterator it(space->bottom(), space->top()); NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
DiscoverGreyObjectsOnPage(page); DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return; if (marking_deque()->IsFull()) return;
} }
...@@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced {
// evacuation. // evacuation.
static int PageEvacuationThreshold() { static int PageEvacuationThreshold() {
if (FLAG_page_promotion) if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
100; return Page::kAllocatableMemory + kPointerSize;
return NewSpacePage::kAllocatableMemory + kPointerSize;
} }
explicit Evacuator(MarkCompactCollector* collector) explicit Evacuator(MarkCompactCollector* collector)
...@@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
duration_(0.0), duration_(0.0),
bytes_compacted_(0) {} bytes_compacted_(0) {}
inline bool EvacuatePage(MemoryChunk* chunk); inline bool EvacuatePage(Page* chunk);
// Merge back locally cached info sequentially. Note that this method needs // Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread. // to be called from the main thread.
...@@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
} }
template <IterationMode mode, class Visitor> template <IterationMode mode, class Visitor>
inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor); inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
MarkCompactCollector* collector_; MarkCompactCollector* collector_;
...@@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
}; };
template <MarkCompactCollector::IterationMode mode, class Visitor> template <MarkCompactCollector::IterationMode mode, class Visitor>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
Visitor* visitor) { Visitor* visitor) {
bool success = false; bool success = false;
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
...@@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, ...@@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
return success; return success;
} }
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
bool result = false; bool result = false;
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), DCHECK(page->SweepingDone());
NewSpacePage::kSweepingDone); switch (ComputeEvacuationMode(page)) {
switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld: case kObjectsNewToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
DCHECK(result); DCHECK(result);
USE(result); USE(result);
break; break;
case kPageNewToOld: case kPageNewToOld:
result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor); result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
DCHECK(result); DCHECK(result);
USE(result); USE(result);
break; break;
case kObjectsOldToOld: case kObjectsOldToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
if (!result) { if (!result) {
// Aborted compaction page. We can record slots here to have them // Aborted compaction page. We can record slots here to have them
// processed in parallel later on. // processed in parallel later on.
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
DCHECK(result); DCHECK(result);
USE(result); USE(result);
// We need to return failure here to indicate that we want this page // We need to return failure here to indicate that we want this page
...@@ -3244,7 +3241,7 @@ class EvacuationJobTraits { ...@@ -3244,7 +3241,7 @@ class EvacuationJobTraits {
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
MemoryChunk* chunk, PerPageData) { MemoryChunk* chunk, PerPageData) {
return evacuator->EvacuatePage(chunk); return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
} }
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
...@@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages); job.AddPage(page, &abandoned_pages);
} }
const Address age_mark = heap()->new_space()->age_mark(); const Address age_mark = heap()->new_space()->age_mark();
for (NewSpacePage* page : newspace_evacuation_candidates_) { for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() && if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
...@@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) { ...@@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) {
Address space_end = heap->new_space()->top(); Address space_end = heap->new_space()->top();
NewSpacePageIterator it(space_start, space_end); NewSpacePageIterator it(space_start, space_end);
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
Address start = Address start =
page->Contains(space_start) ? space_start : page->area_start(); page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end(); Address end = page->Contains(space_end) ? space_end : page->area_end();
......
...@@ -862,7 +862,7 @@ class MarkCompactCollector { ...@@ -862,7 +862,7 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_; bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_; List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_; List<Page*> newspace_evacuation_candidates_;
// True if we are collecting slots to perform evacuation from evacuation // True if we are collecting slots to perform evacuation from evacuation
// candidates. // candidates.
......
...@@ -56,8 +56,8 @@ Page* PageIterator::next() { ...@@ -56,8 +56,8 @@ Page* PageIterator::next() {
HeapObject* SemiSpaceIterator::Next() { HeapObject* SemiSpaceIterator::Next() {
while (current_ != limit_) { while (current_ != limit_) {
if (NewSpacePage::IsAtEnd(current_)) { if (Page::IsAlignedToPageSize(current_)) {
NewSpacePage* page = NewSpacePage::FromLimit(current_); Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page(); page = page->next_page();
DCHECK(!page->is_anchor()); DCHECK(!page->is_anchor());
current_ = page->area_start(); current_ = page->area_start();
...@@ -80,9 +80,9 @@ HeapObject* SemiSpaceIterator::next_object() { return Next(); } ...@@ -80,9 +80,9 @@ HeapObject* SemiSpaceIterator::next_object() { return Next(); }
// NewSpacePageIterator // NewSpacePageIterator
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), next_page_(Page::FromAddress(space->ToSpaceStart())),
last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {} last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
: prev_page_(space->anchor()), : prev_page_(space->anchor()),
...@@ -90,17 +90,16 @@ NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) ...@@ -90,17 +90,16 @@ NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
last_page_(prev_page_->prev_page()) {} last_page_(prev_page_->prev_page()) {}
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
: prev_page_(NewSpacePage::FromAddress(start)->prev_page()), : prev_page_(Page::FromAddress(start)->prev_page()),
next_page_(NewSpacePage::FromAddress(start)), next_page_(Page::FromAddress(start)),
last_page_(NewSpacePage::FromLimit(limit)) { last_page_(Page::FromAllocationAreaAddress(limit)) {
SemiSpace::AssertValidRange(start, limit); SemiSpace::AssertValidRange(start, limit);
} }
bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; } bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
Page* NewSpacePageIterator::next() {
NewSpacePage* NewSpacePageIterator::next() {
DCHECK(has_next()); DCHECK(has_next());
prev_page_ = next_page_; prev_page_ = next_page_;
next_page_ = next_page_->next_page(); next_page_ = next_page_->next_page();
...@@ -244,20 +243,18 @@ bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); } ...@@ -244,20 +243,18 @@ bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); } bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
size_t NewSpace::AllocatedSinceLastGC() { size_t NewSpace::AllocatedSinceLastGC() {
const intptr_t age_mark_offset = const intptr_t age_mark_offset = Page::OffsetInPage(to_space_.age_mark());
NewSpacePage::OffsetInPage(to_space_.age_mark()); const intptr_t top_offset = Page::OffsetInPage(allocation_info_.top());
const intptr_t top_offset =
NewSpacePage::OffsetInPage(allocation_info_.top());
const intptr_t age_mark_delta = const intptr_t age_mark_delta =
age_mark_offset >= NewSpacePage::kObjectStartOffset age_mark_offset >= Page::kObjectStartOffset
? age_mark_offset - NewSpacePage::kObjectStartOffset ? age_mark_offset - Page::kObjectStartOffset
: NewSpacePage::kAllocatableMemory; : Page::kAllocatableMemory;
const intptr_t top_delta = top_offset >= NewSpacePage::kObjectStartOffset const intptr_t top_delta = top_offset >= Page::kObjectStartOffset
? top_offset - NewSpacePage::kObjectStartOffset ? top_offset - Page::kObjectStartOffset
: NewSpacePage::kAllocatableMemory; : Page::kAllocatableMemory;
DCHECK((allocated_since_last_gc_ > 0) || DCHECK((allocated_since_last_gc_ > 0) ||
(NewSpacePage::FromLimit(allocation_info_.top()) == (Page::FromAllocationAreaAddress(allocation_info_.top()) ==
NewSpacePage::FromLimit(to_space_.age_mark()))); Page::FromAllocationAreaAddress(to_space_.age_mark())));
return static_cast<size_t>(allocated_since_last_gc_ + top_delta - return static_cast<size_t>(allocated_since_last_gc_ + top_delta -
age_mark_delta); age_mark_delta);
} }
...@@ -270,16 +267,15 @@ AllocationSpace AllocationResult::RetrySpace() { ...@@ -270,16 +267,15 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::cast(object_)->value()); return static_cast<AllocationSpace>(Smi::cast(object_)->value());
} }
NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk, Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
Executability executable, SemiSpace* owner) {
SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE); DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace); bool in_to_space = (owner->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE); : MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE)); : MemoryChunk::IN_TO_SPACE));
NewSpacePage* page = static_cast<NewSpacePage*>(chunk); Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page); heap->incremental_marking()->SetNewSpacePageFlags(page);
return page; return page;
} }
...@@ -309,7 +305,8 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, ...@@ -309,7 +305,8 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page; return page;
} }
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) { Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
DCHECK(old_page->InNewSpace());
old_page->set_owner(new_owner); old_page->set_owner(new_owner);
old_page->SetFlags(0, ~0); old_page->SetFlags(0, ~0);
new_owner->AccountCommitted(old_page->size()); new_owner->AccountCommitted(old_page->size());
...@@ -359,14 +356,14 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { ...@@ -359,14 +356,14 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
bool PagedSpace::Contains(Address addr) { bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr); Page* p = Page::FromAddress(addr);
if (!p->is_valid()) return false; if (!Page::IsValid(p)) return false;
return p->owner() == this; return p->owner() == this;
} }
bool PagedSpace::Contains(Object* o) { bool PagedSpace::Contains(Object* o) {
if (!o->IsHeapObject()) return false; if (!o->IsHeapObject()) return false;
Page* p = Page::FromAddress(HeapObject::cast(o)->address()); Page* p = Page::FromAddress(HeapObject::cast(o)->address());
if (!p->is_valid()) return false; if (!Page::IsValid(p)) return false;
return p->owner() == this; return p->owner() == this;
} }
...@@ -472,16 +469,6 @@ MemoryChunk* MemoryChunkIterator::next() { ...@@ -472,16 +469,6 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr; return nullptr;
} }
void Page::set_next_page(Page* page) {
DCHECK(page->owner() == owner());
set_next_chunk(page);
}
void Page::set_prev_page(Page* page) {
DCHECK(page->owner() == owner());
set_prev_chunk(page);
}
Page* FreeListCategory::page() { Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this)); return Page::FromAddress(reinterpret_cast<Address>(this));
} }
......
...@@ -425,20 +425,12 @@ Address MemoryAllocator::AllocateAlignedMemory( ...@@ -425,20 +425,12 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base; return base;
} }
void Page::InitializeAsAnchor(Space* space) {
void Page::InitializeAsAnchor(PagedSpace* owner) { set_owner(space);
set_owner(owner);
set_prev_page(this);
set_next_page(this);
}
void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
set_owner(semi_space);
set_next_chunk(this); set_next_chunk(this);
set_prev_chunk(this); set_prev_chunk(this);
// Flags marks this invalid page as not being in new-space.
// All real new-space pages will be in new-space.
SetFlags(0, ~0); SetFlags(0, ~0);
SetFlag(ANCHOR);
} }
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
...@@ -772,12 +764,11 @@ template void MemoryAllocator::Free<MemoryAllocator::kRegular>( ...@@ -772,12 +764,11 @@ template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
template void MemoryAllocator::Free<MemoryAllocator::kPooled>( template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
MemoryChunk* chunk); MemoryChunk* chunk);
template <typename PageType, MemoryAllocator::AllocationMode mode, template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
typename SpaceType> Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, Executability executable) {
Executability executable) {
MemoryChunk* chunk = nullptr; MemoryChunk* chunk = nullptr;
if (mode == kPooled) { if (alloc_mode == kPooled) {
DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory)); DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
DCHECK_EQ(executable, NOT_EXECUTABLE); DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner); chunk = AllocatePagePooled(owner);
...@@ -786,21 +777,27 @@ PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner, ...@@ -786,21 +777,27 @@ PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
chunk = AllocateChunk(size, size, executable, owner); chunk = AllocateChunk(size, size, executable, owner);
} }
if (chunk == nullptr) return nullptr; if (chunk == nullptr) return nullptr;
return PageType::Initialize(isolate_->heap(), chunk, executable, owner); return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
intptr_t size, PagedSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
intptr_t size, SemiSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
intptr_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == nullptr) return nullptr;
return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
} }
template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
PagedSpace>(intptr_t, PagedSpace*,
Executability);
template LargePage*
MemoryAllocator::AllocatePage<LargePage, MemoryAllocator::kRegular, Space>(
intptr_t, Space*, Executability);
template NewSpacePage* MemoryAllocator::AllocatePage<
NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
Executability);
template <typename SpaceType> template <typename SpaceType>
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (chunk_pool_.is_empty()) return nullptr; if (chunk_pool_.is_empty()) return nullptr;
...@@ -1041,13 +1038,11 @@ void Space::AllocationStep(Address soon_object, int size) { ...@@ -1041,13 +1038,11 @@ void Space::AllocationStep(Address soon_object, int size) {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable) Executability executable)
: Space(heap, space, executable), free_list_(this) { : Space(heap, space, executable), anchor_(this), free_list_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space); area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear(); accounting_stats_.Clear();
allocation_info_.Reset(nullptr, nullptr); allocation_info_.Reset(nullptr, nullptr);
anchor_.InitializeAsAnchor(this);
} }
...@@ -1180,8 +1175,7 @@ bool PagedSpace::Expand() { ...@@ -1180,8 +1175,7 @@ bool PagedSpace::Expand() {
if (!heap()->CanExpandOldGeneration(size)) return false; if (!heap()->CanExpandOldGeneration(size)) return false;
Page* p = Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
if (p == nullptr) return false; if (p == nullptr) return false;
AccountCommitted(static_cast<intptr_t>(p->size())); AccountCommitted(static_cast<intptr_t>(p->size()));
...@@ -1240,7 +1234,7 @@ void PagedSpace::ReleasePage(Page* page) { ...@@ -1240,7 +1234,7 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_.EvictFreeListItems(page); free_list_.EvictFreeListItems(page);
DCHECK(!free_list_.ContainsPageFreeListItems(page)); DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) { if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
allocation_info_.Reset(nullptr, nullptr); allocation_info_.Reset(nullptr, nullptr);
} }
...@@ -1269,7 +1263,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1269,7 +1263,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
while (page_iterator.has_next()) { while (page_iterator.has_next()) {
Page* page = page_iterator.next(); Page* page = page_iterator.next();
CHECK(page->owner() == this); CHECK(page->owner() == this);
if (page == Page::FromAllocationTop(allocation_info_.top())) { if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
allocation_pointer_found_in_space = true; allocation_pointer_found_in_space = true;
} }
CHECK(page->SweepingDone()); CHECK(page->SweepingDone());
...@@ -1488,14 +1482,14 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { ...@@ -1488,14 +1482,14 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
bool NewSpace::AddFreshPage() { bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top(); Address top = allocation_info_.top();
DCHECK(!NewSpacePage::IsAtStart(top)); DCHECK(!Page::IsAtObjectStart(top));
if (!to_space_.AdvancePage()) { if (!to_space_.AdvancePage()) {
// No more pages left to advance. // No more pages left to advance.
return false; return false;
} }
// Clear remainder of current page. // Clear remainder of current page.
Address limit = NewSpacePage::FromLimit(top)->area_end(); Address limit = Page::FromAllocationAreaAddress(top)->area_end();
if (heap()->gc_state() == Heap::SCAVENGE) { if (heap()->gc_state() == Heap::SCAVENGE) {
heap()->promotion_queue()->SetNewLimit(limit); heap()->promotion_queue()->SetNewLimit(limit);
} }
...@@ -1503,7 +1497,7 @@ bool NewSpace::AddFreshPage() { ...@@ -1503,7 +1497,7 @@ bool NewSpace::AddFreshPage() {
int remaining_in_page = static_cast<int>(limit - top); int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo); heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
pages_used_++; pages_used_++;
allocated_since_last_gc_ += NewSpacePage::kAllocatableMemory; allocated_since_last_gc_ += Page::kAllocatableMemory;
UpdateAllocationInfo(); UpdateAllocationInfo();
return true; return true;
...@@ -1622,9 +1616,9 @@ void NewSpace::Verify() { ...@@ -1622,9 +1616,9 @@ void NewSpace::Verify() {
CHECK_EQ(current, to_space_.space_start()); CHECK_EQ(current, to_space_.space_start());
while (current != top()) { while (current != top()) {
if (!NewSpacePage::IsAtEnd(current)) { if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object. // The allocation pointer should not be in the middle of an object.
CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
current < top()); current < top());
HeapObject* object = HeapObject::FromAddress(current); HeapObject* object = HeapObject::FromAddress(current);
...@@ -1650,7 +1644,7 @@ void NewSpace::Verify() { ...@@ -1650,7 +1644,7 @@ void NewSpace::Verify() {
current += size; current += size;
} else { } else {
// At end of page, switch to next page. // At end of page, switch to next page.
NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); Page* page = Page::FromAllocationAreaAddress(current)->next_page();
// Next page should be valid. // Next page should be valid.
CHECK(!page->is_anchor()); CHECK(!page->is_anchor());
current = page->area_start(); current = page->area_start();
...@@ -1686,14 +1680,12 @@ void SemiSpace::TearDown() { ...@@ -1686,14 +1680,12 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() { bool SemiSpace::Commit() {
DCHECK(!is_committed()); DCHECK(!is_committed());
NewSpacePage* current = anchor(); Page* current = anchor();
const int num_pages = current_capacity_ / Page::kPageSize; const int num_pages = current_capacity_ / Page::kPageSize;
for (int pages_added = 0; pages_added < num_pages; pages_added++) { for (int pages_added = 0; pages_added < num_pages; pages_added++) {
NewSpacePage* new_page = Page* new_page =
heap() heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
->memory_allocator() Page::kAllocatableMemory, this, executable());
->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
NewSpacePage::kAllocatableMemory, this, executable());
if (new_page == nullptr) { if (new_page == nullptr) {
RewindPages(current, pages_added); RewindPages(current, pages_added);
return false; return false;
...@@ -1740,20 +1732,18 @@ bool SemiSpace::GrowTo(int new_capacity) { ...@@ -1740,20 +1732,18 @@ bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) { if (!is_committed()) {
if (!Commit()) return false; if (!Commit()) return false;
} }
DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0); DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
DCHECK_LE(new_capacity, maximum_capacity_); DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_); DCHECK_GT(new_capacity, current_capacity_);
const int delta = new_capacity - current_capacity_; const int delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
const int delta_pages = delta / NewSpacePage::kPageSize; const int delta_pages = delta / Page::kPageSize;
NewSpacePage* last_page = anchor()->prev_page(); Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor()); DCHECK_NE(last_page, anchor());
for (int pages_added = 0; pages_added < delta_pages; pages_added++) { for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
NewSpacePage* new_page = Page* new_page =
heap() heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
->memory_allocator() Page::kAllocatableMemory, this, executable());
->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
NewSpacePage::kAllocatableMemory, this, executable());
if (new_page == nullptr) { if (new_page == nullptr) {
RewindPages(last_page, pages_added); RewindPages(last_page, pages_added);
return false; return false;
...@@ -1761,8 +1751,7 @@ bool SemiSpace::GrowTo(int new_capacity) { ...@@ -1761,8 +1751,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
new_page->InsertAfter(last_page); new_page->InsertAfter(last_page);
Bitmap::Clear(new_page); Bitmap::Clear(new_page);
// Duplicate the flags that was set on the old page. // Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page->GetFlags(), new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
NewSpacePage::kCopyOnFlipFlagsMask);
last_page = new_page; last_page = new_page;
} }
AccountCommitted(static_cast<intptr_t>(delta)); AccountCommitted(static_cast<intptr_t>(delta));
...@@ -1770,9 +1759,9 @@ bool SemiSpace::GrowTo(int new_capacity) { ...@@ -1770,9 +1759,9 @@ bool SemiSpace::GrowTo(int new_capacity) {
return true; return true;
} }
void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) { void SemiSpace::RewindPages(Page* start, int num_pages) {
NewSpacePage* new_last_page = nullptr; Page* new_last_page = nullptr;
NewSpacePage* last_page = start; Page* last_page = start;
while (num_pages > 0) { while (num_pages > 0) {
DCHECK_NE(last_page, anchor()); DCHECK_NE(last_page, anchor());
new_last_page = last_page->prev_page(); new_last_page = last_page->prev_page();
...@@ -1784,15 +1773,15 @@ void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) { ...@@ -1784,15 +1773,15 @@ void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
} }
bool SemiSpace::ShrinkTo(int new_capacity) { bool SemiSpace::ShrinkTo(int new_capacity) {
DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0); DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
DCHECK_GE(new_capacity, minimum_capacity_); DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_); DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) { if (is_committed()) {
const int delta = current_capacity_ - new_capacity; const int delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
int delta_pages = delta / NewSpacePage::kPageSize; int delta_pages = delta / Page::kPageSize;
NewSpacePage* new_last_page; Page* new_last_page;
NewSpacePage* last_page; Page* last_page;
while (delta_pages > 0) { while (delta_pages > 0) {
last_page = anchor()->prev_page(); last_page = anchor()->prev_page();
new_last_page = last_page->prev_page(); new_last_page = last_page->prev_page();
...@@ -1809,13 +1798,12 @@ bool SemiSpace::ShrinkTo(int new_capacity) { ...@@ -1809,13 +1798,12 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
anchor_.set_owner(this); anchor_.set_owner(this);
// Fixup back-pointers to anchor. Address of anchor changes when we swap.
anchor_.prev_page()->set_next_page(&anchor_); anchor_.prev_page()->set_next_page(&anchor_);
anchor_.next_page()->set_prev_page(&anchor_); anchor_.next_page()->set_prev_page(&anchor_);
NewSpacePageIterator it(this); NewSpacePageIterator it(this);
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
page->set_owner(this); page->set_owner(this);
page->SetFlags(flags, mask); page->SetFlags(flags, mask);
if (id_ == kToSpace) { if (id_ == kToSpace) {
...@@ -1838,12 +1826,11 @@ void SemiSpace::Reset() { ...@@ -1838,12 +1826,11 @@ void SemiSpace::Reset() {
current_page_ = anchor_.next_page(); current_page_ = anchor_.next_page();
} }
void SemiSpace::ReplaceWithEmptyPage(NewSpacePage* old_page) { void SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
NewSpacePage* new_page = Page* new_page = heap()->memory_allocator()->AllocatePage(
heap()->memory_allocator()->AllocatePage<NewSpacePage>( Page::kAllocatableMemory, this, executable());
NewSpacePage::kAllocatableMemory, this, executable());
Bitmap::Clear(new_page); Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), NewSpacePage::kCopyAllFlags); new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
new_page->set_next_page(old_page->next_page()); new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page()); new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page); old_page->next_page()->set_prev_page(new_page);
...@@ -1868,13 +1855,13 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { ...@@ -1868,13 +1855,13 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->anchor_, to->anchor_); std::swap(from->anchor_, to->anchor_);
std::swap(from->current_page_, to->current_page_); std::swap(from->current_page_, to->current_page_);
to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask); to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(0, 0); from->FixPagesFlags(0, 0);
} }
void SemiSpace::set_age_mark(Address mark) { void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this); DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark; age_mark_ = mark;
// Mark all pages up to the one containing mark. // Mark all pages up to the one containing mark.
NewSpacePageIterator it(space_start(), mark); NewSpacePageIterator it(space_start(), mark);
...@@ -1891,10 +1878,10 @@ void SemiSpace::Print() {} ...@@ -1891,10 +1878,10 @@ void SemiSpace::Print() {}
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
void SemiSpace::Verify() { void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace); bool is_from_space = (id_ == kFromSpace);
NewSpacePage* page = anchor_.next_page(); Page* page = anchor_.next_page();
CHECK(anchor_.semi_space() == this); CHECK(anchor_.owner() == this);
while (page != &anchor_) { while (page != &anchor_) {
CHECK_EQ(page->semi_space(), this); CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace()); CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE)); : MemoryChunk::IN_TO_SPACE));
...@@ -1922,10 +1909,10 @@ void SemiSpace::Verify() { ...@@ -1922,10 +1909,10 @@ void SemiSpace::Verify() {
#ifdef DEBUG #ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) { void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space // Addresses belong to same semi-space
NewSpacePage* page = NewSpacePage::FromLimit(start); Page* page = Page::FromAllocationAreaAddress(start);
NewSpacePage* end_page = NewSpacePage::FromLimit(end); Page* end_page = Page::FromAllocationAreaAddress(end);
SemiSpace* space = page->semi_space(); SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
CHECK_EQ(space, end_page->semi_space()); CHECK_EQ(space, end_page->owner());
// Start address is before end address, either on same page, // Start address is before end address, either on same page,
// or end address is on a later page in the linked list of // or end address is on a later page in the linked list of
// semi-space pages. // semi-space pages.
...@@ -2599,7 +2586,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() { ...@@ -2599,7 +2586,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() { void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
if (allocation_info_.top() >= allocation_info_.limit()) return; if (allocation_info_.top() >= allocation_info_.limit()) return;
if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) { if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
// Create filler object to keep page iterable if it was iterable. // Create filler object to keep page iterable if it was iterable.
int remaining = int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top()); static_cast<int>(allocation_info_.limit() - allocation_info_.top());
...@@ -2908,7 +2895,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, ...@@ -2908,7 +2895,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity()); return AllocationResult::Retry(identity());
} }
LargePage* page = heap()->memory_allocator()->AllocatePage<LargePage>( LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable); object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity()); if (page == NULL) return AllocationResult::Retry(identity());
DCHECK(page->area_size() >= object_size); DCHECK(page->area_size() >= object_size);
...@@ -2977,7 +2964,7 @@ LargePage* LargeObjectSpace::FindPage(Address a) { ...@@ -2977,7 +2964,7 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
if (e != NULL) { if (e != NULL) {
DCHECK(e->value != NULL); DCHECK(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value); LargePage* page = reinterpret_cast<LargePage*>(e->value);
DCHECK(page->is_valid()); DCHECK(LargePage::IsValid(page));
if (page->Contains(a)) { if (page->Contains(a)) {
return page; return page;
} }
......
...@@ -27,7 +27,6 @@ class FreeList; ...@@ -27,7 +27,6 @@ class FreeList;
class Isolate; class Isolate;
class MemoryAllocator; class MemoryAllocator;
class MemoryChunk; class MemoryChunk;
class NewSpacePage;
class Page; class Page;
class PagedSpace; class PagedSpace;
class SemiSpace; class SemiSpace;
...@@ -445,6 +444,9 @@ class MemoryChunk { ...@@ -445,6 +444,9 @@ class MemoryChunk {
// has been aborted and needs special handling by the sweeper. // has been aborted and needs special handling by the sweeper.
COMPACTION_WAS_ABORTED, COMPACTION_WAS_ABORTED,
// |ANCHOR|: Flag is set if page is an anchor.
ANCHOR,
// Last flag, keep at bottom. // Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS NUM_MEMORY_CHUNK_FLAGS
}; };
...@@ -556,7 +558,7 @@ class MemoryChunk { ...@@ -556,7 +558,7 @@ class MemoryChunk {
if (mark == nullptr) return; if (mark == nullptr) return;
// Need to subtract one from the mark because when a chunk is full the // Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs // top points to the next address after the chunk, which effectively belongs
// to another chunk. See the comment to Page::FromAllocationTop. // to another chunk. See the comment to Page::FromTopOrLimit.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0; intptr_t old_mark = 0;
...@@ -566,9 +568,9 @@ class MemoryChunk { ...@@ -566,9 +568,9 @@ class MemoryChunk {
!chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
} }
Address address() { return reinterpret_cast<Address>(this); } static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
bool is_valid() { return address() != NULL; } Address address() { return reinterpret_cast<Address>(this); }
base::Mutex* mutex() { return mutex_; } base::Mutex* mutex() { return mutex_; }
...@@ -825,50 +827,81 @@ class MemoryChunk { ...@@ -825,50 +827,81 @@ class MemoryChunk {
// //
// The only way to get a page pointer is by calling factory methods: // The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or // Page* p = Page::FromAddress(addr); or
// Page* p = Page::FromAllocationTop(top); // Page* p = Page::FromTopOrLimit(top);
class Page : public MemoryChunk { class Page : public MemoryChunk {
public: public:
static inline Page* Convert(NewSpacePage* old_page, PagedSpace* new_owner); static const intptr_t kCopyAllFlags = ~0;
// Page flags copied from from-space to to-space when flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
// Maximum object size that gets allocated into regular pages. Objects larger
// than that size are allocated in large object space and are never moved in
// memory. This also applies to new space allocation, since objects are never
// migrated from new space to large object space. Takes double alignment into
// account.
// TODO(hpayer): This limit should be way smaller but we currently have
// short living objects >256K.
static const int kMaxRegularHeapObjectSize = 600 * KB;
static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
// Returns the page containing a given address. The address ranges // Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[ // from [page_addr .. page_addr + kPageSize[. This only works if the object
// This only works if the object is in fact in a page. See also MemoryChunk:: // is in fact in a page.
// FromAddress() and FromAnyAddress(). static Page* FromAddress(Address addr) {
INLINE(static Page* FromAddress(Address a)) { return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
} }
// Only works for addresses in pointer spaces, not code space. // Returns the page containing the address provided. The address can
inline static Page* FromAnyPointerAddress(Heap* heap, Address addr); // potentially point righter after the page. To be also safe for tagged values
// we subtract a hole word. The valid address ranges from
// [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
static Page* FromAllocationAreaAddress(Address address) {
return Page::FromAddress(address - kPointerSize);
}
// Returns the page containing an allocation top. Because an allocation // Checks if address1 and address2 are on the same new space page.
// top address can be the upper bound of the page, we need to subtract static bool OnSamePage(Address address1, Address address2) {
// it with kPointerSize first. The address ranges from return Page::FromAddress(address1) == Page::FromAddress(address2);
// [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page* FromAllocationTop(Address top)) {
Page* p = FromAddress(top - kPointerSize);
return p;
} }
// Returns the next page in the chain of pages owned by a space. // Checks whether an address is page aligned.
inline Page* next_page() { static bool IsAlignedToPageSize(Address addr) {
DCHECK(next_chunk()->owner() == owner()); return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
return static_cast<Page*>(next_chunk());
} }
inline Page* prev_page() {
DCHECK(prev_chunk()->owner() == owner()); static bool IsAtObjectStart(Address addr) {
return static_cast<Page*>(prev_chunk()); return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
kObjectStartOffset;
} }
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
// Checks whether an address is page aligned. inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask); // Create a Page object that is only used as anchor for the doubly-linked
// list of real pages.
explicit Page(Space* owner) { InitializeAsAnchor(owner); }
inline void MarkNeverAllocateForTesting();
inline void MarkEvacuationCandidate();
inline void ClearEvacuationCandidate();
Page* next_page() { return static_cast<Page*>(next_chunk()); }
Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
void set_next_page(Page* page) { set_next_chunk(page); }
void set_prev_page(Page* page) { set_prev_chunk(page); }
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
callback(&categories_[i]);
}
} }
// Returns the offset of a given address to this page. // Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) { inline int Offset(Address a) {
int offset = static_cast<int>(a - address()); int offset = static_cast<int>(a - address());
return offset; return offset;
} }
...@@ -879,21 +912,6 @@ class Page : public MemoryChunk { ...@@ -879,21 +912,6 @@ class Page : public MemoryChunk {
return address() + offset; return address() + offset;
} }
// ---------------------------------------------------------------------
// Maximum object size that gets allocated into regular pages. Objects larger
// than that size are allocated in large object space and are never moved in
// memory. This also applies to new space allocation, since objects are never
// migrated from new space to large object space. Takes double alignment into
// account.
// TODO(hpayer): This limit should be way smaller but we currently have
// short living objects >256K.
static const int kMaxRegularHeapObjectSize = 600 * KB;
inline void ClearGCFields();
void InitializeAsAnchor(PagedSpace* owner);
// WaitUntilSweepingCompleted only works when concurrent sweeping is in // WaitUntilSweepingCompleted only works when concurrent sweeping is in
// progress. In particular, when we know that right before this call a // progress. In particular, when we know that right before this call a
// sweeper thread was sweeping this page. // sweeper thread was sweeping this page.
...@@ -914,48 +932,39 @@ class Page : public MemoryChunk { ...@@ -914,48 +932,39 @@ class Page : public MemoryChunk {
available_in_free_list()); available_in_free_list());
} }
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
callback(&categories_[i]);
}
}
FreeListCategory* free_list_category(FreeListCategoryType type) { FreeListCategory* free_list_category(FreeListCategoryType type) {
return &categories_[type]; return &categories_[type];
} }
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
type name() { return name##_.Value(); } \
void set_##name(type name) { name##_.SetValue(name); } \
void add_##name(type name) { name##_.Increment(name); }
FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) intptr_t wasted_memory() { return wasted_memory_.Value(); }
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
#undef FRAGMENTATION_STATS_ACCESSORS void add_available_in_free_list(intptr_t available) {
available_in_free_list_.Increment(available);
}
#ifdef DEBUG #ifdef DEBUG
void Print(); void Print();
#endif // DEBUG #endif // DEBUG
inline void MarkNeverAllocateForTesting();
inline void MarkEvacuationCandidate();
inline void ClearEvacuationCandidate();
private: private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory> template <InitializationMode mode = kFreeMemory>
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner); Executability executable, PagedSpace* owner);
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, SemiSpace* owner);
inline void InitializeFreeListCategories(); inline void InitializeFreeListCategories();
void InitializeAsAnchor(Space* owner);
friend class MemoryAllocator; friend class MemoryAllocator;
}; };
class LargePage : public MemoryChunk { class LargePage : public MemoryChunk {
public: public:
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
...@@ -1275,13 +1284,15 @@ class MemoryAllocator { ...@@ -1275,13 +1284,15 @@ class MemoryAllocator {
void TearDown(); void TearDown();
// Allocates either Page or NewSpacePage from the allocator. AllocationMode // Allocates a Page from the allocator. AllocationMode is used to indicate
// is used to indicate whether pooled allocation, which only works for // whether pooled allocation, which only works for MemoryChunk::kPageSize,
// MemoryChunk::kPageSize, should be tried first. // should be tried first.
template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType> typename SpaceType>
PageType* AllocatePage(intptr_t size, SpaceType* owner, Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
Executability executable);
LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
Executability executable);
// PreFree logically frees the object, i.e., it takes care of the size // PreFree logically frees the object, i.e., it takes care of the size
// bookkeeping and calls the allocation callback. // bookkeeping and calls the allocation callback.
...@@ -1592,7 +1603,8 @@ class AllocationInfo { ...@@ -1592,7 +1603,8 @@ class AllocationInfo {
#ifdef DEBUG #ifdef DEBUG
bool VerifyPagedAllocation() { bool VerifyPagedAllocation() {
return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) && return (Page::FromAllocationAreaAddress(top_) ==
Page::FromAllocationAreaAddress(limit_)) &&
(top_ <= limit_); (top_ <= limit_);
} }
#endif #endif
...@@ -2303,86 +2315,8 @@ class HistogramInfo : public NumberAndSizeInfo { ...@@ -2303,86 +2315,8 @@ class HistogramInfo : public NumberAndSizeInfo {
const char* name_; const char* name_;
}; };
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
class NewSpacePage : public MemoryChunk {
public:
static bool IsAtStart(Address addr) {
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
kObjectStartOffset;
}
static bool IsAtEnd(Address addr) {
return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
}
// Finds the NewSpacePage containing the given address.
static inline NewSpacePage* FromAddress(Address address_in_page) {
Address page_start =
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
~Page::kPageAlignmentMask);
NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
return page;
}
// Find the page for a limit address. A limit address is either an address
// inside a page, or the address right after the last byte of a page.
static inline NewSpacePage* FromLimit(Address address_limit) {
return NewSpacePage::FromAddress(address_limit - 1);
}
// Checks if address1 and address2 are on the same new space page.
static inline bool OnSamePage(Address address1, Address address2) {
return NewSpacePage::FromAddress(address1) ==
NewSpacePage::FromAddress(address2);
}
inline NewSpacePage* next_page() {
return static_cast<NewSpacePage*>(next_chunk());
}
inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
inline NewSpacePage* prev_page() {
return static_cast<NewSpacePage*>(prev_chunk());
}
inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
bool is_anchor() { return !this->InNewSpace(); }
private:
static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable,
SemiSpace* owner);
// GC related flags copied from from-space to to-space when
// flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const intptr_t kCopyAllFlags = ~0;
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
// Intialize a fake NewSpacePage used as sentinel at the ends
// of a doubly-linked list of real NewSpacePages.
// Only uses the prev/next links, and sets flags to not be in new-space.
void InitializeAsAnchor(SemiSpace* owner);
friend class MemoryAllocator;
friend class SemiSpace;
friend class SemiSpaceIterator;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// SemiSpace in young generation // SemiSpace in young generation
// //
...@@ -2431,8 +2365,8 @@ class SemiSpace : public Space { ...@@ -2431,8 +2365,8 @@ class SemiSpace : public Space {
return anchor_.next_page()->area_start(); return anchor_.next_page()->area_start();
} }
NewSpacePage* first_page() { return anchor_.next_page(); } Page* first_page() { return anchor_.next_page(); }
NewSpacePage* current_page() { return current_page_; } Page* current_page() { return current_page_; }
// Returns one past the end address of the space. // Returns one past the end address of the space.
Address space_end() { return anchor_.prev_page()->area_end(); } Address space_end() { return anchor_.prev_page()->area_end(); }
...@@ -2444,7 +2378,7 @@ class SemiSpace : public Space { ...@@ -2444,7 +2378,7 @@ class SemiSpace : public Space {
Address page_high() { return current_page_->area_end(); } Address page_high() { return current_page_->area_end(); }
bool AdvancePage() { bool AdvancePage() {
NewSpacePage* next_page = current_page_->next_page(); Page* next_page = current_page_->next_page();
if (next_page == anchor()) return false; if (next_page == anchor()) return false;
current_page_ = next_page; current_page_ = next_page;
return true; return true;
...@@ -2453,7 +2387,7 @@ class SemiSpace : public Space { ...@@ -2453,7 +2387,7 @@ class SemiSpace : public Space {
// Resets the space to using the first page. // Resets the space to using the first page.
void Reset(); void Reset();
void ReplaceWithEmptyPage(NewSpacePage* page); void ReplaceWithEmptyPage(Page* page);
// Age mark accessors. // Age mark accessors.
Address age_mark() { return age_mark_; } Address age_mark() { return age_mark_; }
...@@ -2504,9 +2438,9 @@ class SemiSpace : public Space { ...@@ -2504,9 +2438,9 @@ class SemiSpace : public Space {
#endif #endif
private: private:
void RewindPages(NewSpacePage* start, int num_pages); void RewindPages(Page* start, int num_pages);
inline NewSpacePage* anchor() { return &anchor_; } inline Page* anchor() { return &anchor_; }
// Copies the flags into the masked positions on all pages in the space. // Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask); void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
...@@ -2526,8 +2460,8 @@ class SemiSpace : public Space { ...@@ -2526,8 +2460,8 @@ class SemiSpace : public Space {
bool committed_; bool committed_;
SemiSpaceId id_; SemiSpaceId id_;
NewSpacePage anchor_; Page anchor_;
NewSpacePage* current_page_; Page* current_page_;
friend class SemiSpaceIterator; friend class SemiSpaceIterator;
friend class NewSpacePageIterator; friend class NewSpacePageIterator;
...@@ -2575,15 +2509,15 @@ class NewSpacePageIterator BASE_EMBEDDED { ...@@ -2575,15 +2509,15 @@ class NewSpacePageIterator BASE_EMBEDDED {
inline NewSpacePageIterator(Address start, Address limit); inline NewSpacePageIterator(Address start, Address limit);
inline bool has_next(); inline bool has_next();
inline NewSpacePage* next(); inline Page* next();
private: private:
NewSpacePage* prev_page_; // Previous page returned. Page* prev_page_; // Previous page returned.
// Next page that will be returned. Cached here so that we can use this // Next page that will be returned. Cached here so that we can use this
// iterator for operations that deallocate pages. // iterator for operations that deallocate pages.
NewSpacePage* next_page_; Page* next_page_;
// Last page returned. // Last page returned.
NewSpacePage* last_page_; Page* last_page_;
}; };
...@@ -2633,7 +2567,7 @@ class NewSpace : public Space { ...@@ -2633,7 +2567,7 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace. // Return the allocated bytes in the active semispace.
intptr_t Size() override { intptr_t Size() override {
return pages_used_ * NewSpacePage::kAllocatableMemory + return pages_used_ * Page::kAllocatableMemory +
static_cast<int>(top() - to_space_.page_low()); static_cast<int>(top() - to_space_.page_low());
} }
...@@ -2646,7 +2580,7 @@ class NewSpace : public Space { ...@@ -2646,7 +2580,7 @@ class NewSpace : public Space {
intptr_t Capacity() { intptr_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) * return (to_space_.current_capacity() / Page::kPageSize) *
NewSpacePage::kAllocatableMemory; Page::kAllocatableMemory;
} }
// Return the current size of a semispace, allocatable and non-allocatable // Return the current size of a semispace, allocatable and non-allocatable
...@@ -2675,7 +2609,7 @@ class NewSpace : public Space { ...@@ -2675,7 +2609,7 @@ class NewSpace : public Space {
inline size_t AllocatedSinceLastGC(); inline size_t AllocatedSinceLastGC();
void ReplaceWithEmptyPage(NewSpacePage* page) { void ReplaceWithEmptyPage(Page* page) {
// This method is called after flipping the semispace. // This method is called after flipping the semispace.
DCHECK(page->InFromSpace()); DCHECK(page->InFromSpace());
from_space_.ReplaceWithEmptyPage(page); from_space_.ReplaceWithEmptyPage(page);
......
...@@ -6622,15 +6622,14 @@ UNINITIALIZED_TEST(PagePromotion) { ...@@ -6622,15 +6622,14 @@ UNINITIALIZED_TEST(PagePromotion) {
CHECK_GT(handles.size(), 0u); CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page. // First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front(); Handle<FixedArray> first_object = handles.front();
NewSpacePage* first_page = Page* first_page = Page::FromAddress(first_object->address());
NewSpacePage::FromAddress(first_object->address());
// The age mark should not be on the first page. // The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark())); CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap. // To perform a sanity check on live bytes we need to mark the heap.
SimulateIncrementalMarking(heap, true); SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion. // Sanity check that the page meets the requirements for promotion.
const int threshold_bytes = const int threshold_bytes =
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100; FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes); CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space // Actual checks: The page is in new space first, but is moved to old space
......
...@@ -315,12 +315,12 @@ TEST(MemoryAllocator) { ...@@ -315,12 +315,12 @@ TEST(MemoryAllocator) {
{ {
int total_pages = 0; int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE); OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage<Page>( Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE); NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page()); first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid()); CHECK(Page::IsValid(first_page));
CHECK(first_page->next_page() == faked_space.anchor()); CHECK(first_page->next_page() == faked_space.anchor());
total_pages++; total_pages++;
...@@ -329,10 +329,10 @@ TEST(MemoryAllocator) { ...@@ -329,10 +329,10 @@ TEST(MemoryAllocator) {
} }
// Again, we should get n or n - 1 pages. // Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage<Page>( Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE); NOT_EXECUTABLE);
CHECK(other->is_valid()); CHECK(Page::IsValid(other));
total_pages++; total_pages++;
other->InsertAfter(first_page); other->InsertAfter(first_page);
int page_count = 0; int page_count = 0;
...@@ -343,7 +343,7 @@ TEST(MemoryAllocator) { ...@@ -343,7 +343,7 @@ TEST(MemoryAllocator) {
CHECK(total_pages == page_count); CHECK(total_pages == page_count);
Page* second_page = first_page->next_page(); Page* second_page = first_page->next_page();
CHECK(second_page->is_valid()); CHECK(Page::IsValid(second_page));
// OldSpace's destructor will tear down the space and free up all pages. // OldSpace's destructor will tear down the space and free up all pages.
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment