Commit 4e8736da authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Merge NewSpacePage into Page

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1900423002

Cr-Commit-Position: refs/heads/master@{#35768}
parent fac7361c
...@@ -395,7 +395,7 @@ bool Heap::OldGenerationAllocationLimitReached() { ...@@ -395,7 +395,7 @@ bool Heap::OldGenerationAllocationLimitReached() {
bool Heap::ShouldBePromoted(Address old_address, int object_size) { bool Heap::ShouldBePromoted(Address old_address, int object_size) {
NewSpacePage* page = NewSpacePage::FromAddress(old_address); Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_.age_mark(); Address age_mark = new_space_.age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark); (!page->ContainsLimit(age_mark) || old_address < age_mark);
...@@ -476,7 +476,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { ...@@ -476,7 +476,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address object_address = object->address(); Address object_address = object->address();
Address memento_address = object_address + object->Size(); Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize; Address last_memento_word_address = memento_address + kPointerSize;
if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) { if (!Page::OnSamePage(object_address, last_memento_word_address)) {
return nullptr; return nullptr;
} }
HeapObject* candidate = HeapObject::FromAddress(memento_address); HeapObject* candidate = HeapObject::FromAddress(memento_address);
...@@ -504,7 +504,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { ...@@ -504,7 +504,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
top = NewSpaceTop(); top = NewSpaceTop();
DCHECK(memento_address == top || DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top || memento_address + HeapObject::kHeaderSize <= top ||
!NewSpacePage::OnSamePage(memento_address, top - 1)); !Page::OnSamePage(memento_address, top - 1));
if ((memento_address != top) && memento_candidate->IsValid()) { if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate; return memento_candidate;
} }
......
...@@ -945,7 +945,7 @@ void Heap::EnsureFillerObjectAtTop() { ...@@ -945,7 +945,7 @@ void Heap::EnsureFillerObjectAtTop() {
// may be uninitialized memory behind top. We fill the remainder of the page // may be uninitialized memory behind top. We fill the remainder of the page
// with a filler. // with a filler.
Address to_top = new_space_.top(); Address to_top = new_space_.top();
NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize); Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) { if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top); int remaining_in_page = static_cast<int>(page->area_end() - to_top);
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo); CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
...@@ -1552,7 +1552,8 @@ void PromotionQueue::Initialize() { ...@@ -1552,7 +1552,8 @@ void PromotionQueue::Initialize() {
front_ = rear_ = front_ = rear_ =
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd()); reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<struct Entry*>( limit_ = reinterpret_cast<struct Entry*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start()); Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
->area_start());
emergency_stack_ = NULL; emergency_stack_ = NULL;
} }
...@@ -1560,7 +1561,7 @@ void PromotionQueue::Initialize() { ...@@ -1560,7 +1561,7 @@ void PromotionQueue::Initialize() {
void PromotionQueue::RelocateQueueHead() { void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL); DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
struct Entry* head_start = rear_; struct Entry* head_start = rear_;
struct Entry* head_end = struct Entry* head_end =
Min(front_, reinterpret_cast<struct Entry*>(p->area_end())); Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
...@@ -1909,13 +1910,14 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, ...@@ -1909,13 +1910,14 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue of unprocessed copied objects. Process them until the // queue of unprocessed copied objects. Process them until the
// queue is empty. // queue is empty.
while (new_space_front != new_space_.top()) { while (new_space_front != new_space_.top()) {
if (!NewSpacePage::IsAtEnd(new_space_front)) { if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front); HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front += new_space_front +=
StaticScavengeVisitor::IterateBody(object->map(), object); StaticScavengeVisitor::IterateBody(object->map(), object);
} else { } else {
new_space_front = new_space_front = Page::FromAllocationAreaAddress(new_space_front)
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); ->next_page()
->area_start();
} }
} }
...@@ -4629,7 +4631,7 @@ void Heap::ZapFromSpace() { ...@@ -4629,7 +4631,7 @@ void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(), NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd()); new_space_.FromSpaceEnd());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
for (Address cursor = page->area_start(), limit = page->area_end(); for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) { cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue; Memory::Address_at(cursor) = kFromSpaceZapValue;
......
...@@ -322,7 +322,7 @@ class PromotionQueue { ...@@ -322,7 +322,7 @@ class PromotionQueue {
} }
Page* GetHeadPage() { Page* GetHeadPage() {
return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
} }
void SetNewLimit(Address limit) { void SetNewLimit(Address limit) {
...@@ -330,7 +330,7 @@ class PromotionQueue { ...@@ -330,7 +330,7 @@ class PromotionQueue {
if (emergency_stack_) return; if (emergency_stack_) return;
// If the limit is not on the same page, we can ignore it. // If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return; if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<struct Entry*>(limit); limit_ = reinterpret_cast<struct Entry*>(limit);
......
...@@ -348,7 +348,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( ...@@ -348,7 +348,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) { NewSpace* space) {
NewSpacePageIterator it(space); NewSpacePageIterator it(space);
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); Page* p = it.next();
SetNewSpacePageFlags(p, false); SetNewSpacePageFlags(p, false);
} }
} }
...@@ -361,7 +361,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() { ...@@ -361,7 +361,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page(); LargePage* lop = heap_->lo_space()->first_page();
while (lop->is_valid()) { while (LargePage::IsValid(lop)) {
SetOldSpacePageFlags(lop, false, false); SetOldSpacePageFlags(lop, false, false);
lop = lop->next_page(); lop = lop->next_page();
} }
...@@ -380,7 +380,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { ...@@ -380,7 +380,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); Page* p = it.next();
SetNewSpacePageFlags(p, true); SetNewSpacePageFlags(p, true);
} }
} }
...@@ -393,7 +393,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() { ...@@ -393,7 +393,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->new_space()); ActivateIncrementalWriteBarrier(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page(); LargePage* lop = heap_->lo_space()->first_page();
while (lop->is_valid()) { while (LargePage::IsValid(lop)) {
SetOldSpacePageFlags(lop, true, is_compacting_); SetOldSpacePageFlags(lop, true, is_compacting_);
lop = lop->next_page(); lop = lop->next_page();
} }
......
...@@ -185,7 +185,7 @@ class IncrementalMarking { ...@@ -185,7 +185,7 @@ class IncrementalMarking {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
} }
inline void SetNewSpacePageFlags(MemoryChunk* chunk) { inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking()); SetNewSpacePageFlags(chunk, IsMarking());
} }
......
...@@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) { ...@@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), end); NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use // The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages. // page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(), CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
NewSpacePage::FromAddress(space->bottom())->area_start());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
Address limit = it.has_next() ? page->area_end() : end; Address limit = it.has_next() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end)); CHECK(limit == end || !page->Contains(end));
VerifyMarking(space->heap(), page->area_start(), limit); VerifyMarking(space->heap(), page->area_start(), limit);
...@@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) { ...@@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) {
VerifyEvacuationVisitor visitor; VerifyEvacuationVisitor visitor;
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
Address current = page->area_start(); Address current = page->area_start();
Address limit = it.has_next() ? page->area_end() : space->top(); Address limit = it.has_next() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top())); CHECK(limit == space->top() || !page->Contains(space->top()));
...@@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { ...@@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), space->top()); NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* p = it.next(); Page* p = it.next();
CHECK(p->markbits()->IsClean()); CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes()); CHECK_EQ(0, p->LiveBytes());
} }
...@@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final ...@@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
public: public:
EvacuateNewSpacePageVisitor() : promoted_size_(0) {} EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) { static void MoveToOldSpace(Page* page, PagedSpace* owner) {
page->heap()->new_space()->ReplaceWithEmptyPage(page); page->heap()->new_space()->ReplaceWithEmptyPage(page);
Page* new_page = Page::Convert(page, owner); Page* new_page = Page::ConvertNewToOld(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
} }
...@@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() { ...@@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space(); NewSpace* space = heap()->new_space();
NewSpacePageIterator it(space->bottom(), space->top()); NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
DiscoverGreyObjectsOnPage(page); DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return; if (marking_deque()->IsFull()) return;
} }
...@@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced {
// evacuation. // evacuation.
static int PageEvacuationThreshold() { static int PageEvacuationThreshold() {
if (FLAG_page_promotion) if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
100; return Page::kAllocatableMemory + kPointerSize;
return NewSpacePage::kAllocatableMemory + kPointerSize;
} }
explicit Evacuator(MarkCompactCollector* collector) explicit Evacuator(MarkCompactCollector* collector)
...@@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
duration_(0.0), duration_(0.0),
bytes_compacted_(0) {} bytes_compacted_(0) {}
inline bool EvacuatePage(MemoryChunk* chunk); inline bool EvacuatePage(Page* chunk);
// Merge back locally cached info sequentially. Note that this method needs // Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread. // to be called from the main thread.
...@@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
} }
template <IterationMode mode, class Visitor> template <IterationMode mode, class Visitor>
inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor); inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
MarkCompactCollector* collector_; MarkCompactCollector* collector_;
...@@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
}; };
template <MarkCompactCollector::IterationMode mode, class Visitor> template <MarkCompactCollector::IterationMode mode, class Visitor>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
Visitor* visitor) { Visitor* visitor) {
bool success = false; bool success = false;
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
...@@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, ...@@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
return success; return success;
} }
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
bool result = false; bool result = false;
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), DCHECK(page->SweepingDone());
NewSpacePage::kSweepingDone); switch (ComputeEvacuationMode(page)) {
switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld: case kObjectsNewToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
DCHECK(result); DCHECK(result);
USE(result); USE(result);
break; break;
case kPageNewToOld: case kPageNewToOld:
result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor); result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
DCHECK(result); DCHECK(result);
USE(result); USE(result);
break; break;
case kObjectsOldToOld: case kObjectsOldToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
if (!result) { if (!result) {
// Aborted compaction page. We can record slots here to have them // Aborted compaction page. We can record slots here to have them
// processed in parallel later on. // processed in parallel later on.
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
DCHECK(result); DCHECK(result);
USE(result); USE(result);
// We need to return failure here to indicate that we want this page // We need to return failure here to indicate that we want this page
...@@ -3244,7 +3241,7 @@ class EvacuationJobTraits { ...@@ -3244,7 +3241,7 @@ class EvacuationJobTraits {
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
MemoryChunk* chunk, PerPageData) { MemoryChunk* chunk, PerPageData) {
return evacuator->EvacuatePage(chunk); return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
} }
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
...@@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages); job.AddPage(page, &abandoned_pages);
} }
const Address age_mark = heap()->new_space()->age_mark(); const Address age_mark = heap()->new_space()->age_mark();
for (NewSpacePage* page : newspace_evacuation_candidates_) { for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() && if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
...@@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) { ...@@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) {
Address space_end = heap->new_space()->top(); Address space_end = heap->new_space()->top();
NewSpacePageIterator it(space_start, space_end); NewSpacePageIterator it(space_start, space_end);
while (it.has_next()) { while (it.has_next()) {
NewSpacePage* page = it.next(); Page* page = it.next();
Address start = Address start =
page->Contains(space_start) ? space_start : page->area_start(); page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end(); Address end = page->Contains(space_end) ? space_end : page->area_end();
......
...@@ -862,7 +862,7 @@ class MarkCompactCollector { ...@@ -862,7 +862,7 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_; bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_; List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_; List<Page*> newspace_evacuation_candidates_;
// True if we are collecting slots to perform evacuation from evacuation // True if we are collecting slots to perform evacuation from evacuation
// candidates. // candidates.
......
...@@ -56,8 +56,8 @@ Page* PageIterator::next() { ...@@ -56,8 +56,8 @@ Page* PageIterator::next() {
HeapObject* SemiSpaceIterator::Next() { HeapObject* SemiSpaceIterator::Next() {
while (current_ != limit_) { while (current_ != limit_) {
if (NewSpacePage::IsAtEnd(current_)) { if (Page::IsAlignedToPageSize(current_)) {
NewSpacePage* page = NewSpacePage::FromLimit(current_); Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page(); page = page->next_page();
DCHECK(!page->is_anchor()); DCHECK(!page->is_anchor());
current_ = page->area_start(); current_ = page->area_start();
...@@ -80,9 +80,9 @@ HeapObject* SemiSpaceIterator::next_object() { return Next(); } ...@@ -80,9 +80,9 @@ HeapObject* SemiSpaceIterator::next_object() { return Next(); }
// NewSpacePageIterator // NewSpacePageIterator
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), next_page_(Page::FromAddress(space->ToSpaceStart())),
last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {} last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
: prev_page_(space->anchor()), : prev_page_(space->anchor()),
...@@ -90,17 +90,16 @@ NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) ...@@ -90,17 +90,16 @@ NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
last_page_(prev_page_->prev_page()) {} last_page_(prev_page_->prev_page()) {}
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
: prev_page_(NewSpacePage::FromAddress(start)->prev_page()), : prev_page_(Page::FromAddress(start)->prev_page()),
next_page_(NewSpacePage::FromAddress(start)), next_page_(Page::FromAddress(start)),
last_page_(NewSpacePage::FromLimit(limit)) { last_page_(Page::FromAllocationAreaAddress(limit)) {
SemiSpace::AssertValidRange(start, limit); SemiSpace::AssertValidRange(start, limit);
} }
bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; } bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
Page* NewSpacePageIterator::next() {
NewSpacePage* NewSpacePageIterator::next() {
DCHECK(has_next()); DCHECK(has_next());
prev_page_ = next_page_; prev_page_ = next_page_;
next_page_ = next_page_->next_page(); next_page_ = next_page_->next_page();
...@@ -244,20 +243,18 @@ bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); } ...@@ -244,20 +243,18 @@ bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); } bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
size_t NewSpace::AllocatedSinceLastGC() { size_t NewSpace::AllocatedSinceLastGC() {
const intptr_t age_mark_offset = const intptr_t age_mark_offset = Page::OffsetInPage(to_space_.age_mark());
NewSpacePage::OffsetInPage(to_space_.age_mark()); const intptr_t top_offset = Page::OffsetInPage(allocation_info_.top());
const intptr_t top_offset =
NewSpacePage::OffsetInPage(allocation_info_.top());
const intptr_t age_mark_delta = const intptr_t age_mark_delta =
age_mark_offset >= NewSpacePage::kObjectStartOffset age_mark_offset >= Page::kObjectStartOffset
? age_mark_offset - NewSpacePage::kObjectStartOffset ? age_mark_offset - Page::kObjectStartOffset
: NewSpacePage::kAllocatableMemory; : Page::kAllocatableMemory;
const intptr_t top_delta = top_offset >= NewSpacePage::kObjectStartOffset const intptr_t top_delta = top_offset >= Page::kObjectStartOffset
? top_offset - NewSpacePage::kObjectStartOffset ? top_offset - Page::kObjectStartOffset
: NewSpacePage::kAllocatableMemory; : Page::kAllocatableMemory;
DCHECK((allocated_since_last_gc_ > 0) || DCHECK((allocated_since_last_gc_ > 0) ||
(NewSpacePage::FromLimit(allocation_info_.top()) == (Page::FromAllocationAreaAddress(allocation_info_.top()) ==
NewSpacePage::FromLimit(to_space_.age_mark()))); Page::FromAllocationAreaAddress(to_space_.age_mark())));
return static_cast<size_t>(allocated_since_last_gc_ + top_delta - return static_cast<size_t>(allocated_since_last_gc_ + top_delta -
age_mark_delta); age_mark_delta);
} }
...@@ -270,16 +267,15 @@ AllocationSpace AllocationResult::RetrySpace() { ...@@ -270,16 +267,15 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::cast(object_)->value()); return static_cast<AllocationSpace>(Smi::cast(object_)->value());
} }
NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk, Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
Executability executable, SemiSpace* owner) {
SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE); DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace); bool in_to_space = (owner->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE); : MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE)); : MemoryChunk::IN_TO_SPACE));
NewSpacePage* page = static_cast<NewSpacePage*>(chunk); Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page); heap->incremental_marking()->SetNewSpacePageFlags(page);
return page; return page;
} }
...@@ -309,7 +305,8 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, ...@@ -309,7 +305,8 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page; return page;
} }
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) { Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
DCHECK(old_page->InNewSpace());
old_page->set_owner(new_owner); old_page->set_owner(new_owner);
old_page->SetFlags(0, ~0); old_page->SetFlags(0, ~0);
new_owner->AccountCommitted(old_page->size()); new_owner->AccountCommitted(old_page->size());
...@@ -359,14 +356,14 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { ...@@ -359,14 +356,14 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
bool PagedSpace::Contains(Address addr) { bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr); Page* p = Page::FromAddress(addr);
if (!p->is_valid()) return false; if (!Page::IsValid(p)) return false;
return p->owner() == this; return p->owner() == this;
} }
bool PagedSpace::Contains(Object* o) { bool PagedSpace::Contains(Object* o) {
if (!o->IsHeapObject()) return false; if (!o->IsHeapObject()) return false;
Page* p = Page::FromAddress(HeapObject::cast(o)->address()); Page* p = Page::FromAddress(HeapObject::cast(o)->address());
if (!p->is_valid()) return false; if (!Page::IsValid(p)) return false;
return p->owner() == this; return p->owner() == this;
} }
...@@ -472,16 +469,6 @@ MemoryChunk* MemoryChunkIterator::next() { ...@@ -472,16 +469,6 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr; return nullptr;
} }
void Page::set_next_page(Page* page) {
DCHECK(page->owner() == owner());
set_next_chunk(page);
}
void Page::set_prev_page(Page* page) {
DCHECK(page->owner() == owner());
set_prev_chunk(page);
}
Page* FreeListCategory::page() { Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this)); return Page::FromAddress(reinterpret_cast<Address>(this));
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -6622,15 +6622,14 @@ UNINITIALIZED_TEST(PagePromotion) { ...@@ -6622,15 +6622,14 @@ UNINITIALIZED_TEST(PagePromotion) {
CHECK_GT(handles.size(), 0u); CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page. // First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front(); Handle<FixedArray> first_object = handles.front();
NewSpacePage* first_page = Page* first_page = Page::FromAddress(first_object->address());
NewSpacePage::FromAddress(first_object->address());
// The age mark should not be on the first page. // The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark())); CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap. // To perform a sanity check on live bytes we need to mark the heap.
SimulateIncrementalMarking(heap, true); SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion. // Sanity check that the page meets the requirements for promotion.
const int threshold_bytes = const int threshold_bytes =
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100; FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes); CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space // Actual checks: The page is in new space first, but is moved to old space
......
...@@ -315,12 +315,12 @@ TEST(MemoryAllocator) { ...@@ -315,12 +315,12 @@ TEST(MemoryAllocator) {
{ {
int total_pages = 0; int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE); OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage<Page>( Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE); NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page()); first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid()); CHECK(Page::IsValid(first_page));
CHECK(first_page->next_page() == faked_space.anchor()); CHECK(first_page->next_page() == faked_space.anchor());
total_pages++; total_pages++;
...@@ -329,10 +329,10 @@ TEST(MemoryAllocator) { ...@@ -329,10 +329,10 @@ TEST(MemoryAllocator) {
} }
// Again, we should get n or n - 1 pages. // Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage<Page>( Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE); NOT_EXECUTABLE);
CHECK(other->is_valid()); CHECK(Page::IsValid(other));
total_pages++; total_pages++;
other->InsertAfter(first_page); other->InsertAfter(first_page);
int page_count = 0; int page_count = 0;
...@@ -343,7 +343,7 @@ TEST(MemoryAllocator) { ...@@ -343,7 +343,7 @@ TEST(MemoryAllocator) {
CHECK(total_pages == page_count); CHECK(total_pages == page_count);
Page* second_page = first_page->next_page(); Page* second_page = first_page->next_page();
CHECK(second_page->is_valid()); CHECK(Page::IsValid(second_page));
// OldSpace's destructor will tear down the space and free up all pages. // OldSpace's destructor will tear down the space and free up all pages.
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment