Commit 4e8736da authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Merge NewSpacePage into Page

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1900423002

Cr-Commit-Position: refs/heads/master@{#35768}
parent fac7361c
......@@ -395,7 +395,7 @@ bool Heap::OldGenerationAllocationLimitReached() {
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
NewSpacePage* page = NewSpacePage::FromAddress(old_address);
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
......@@ -476,7 +476,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address object_address = object->address();
Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize;
if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
return nullptr;
}
HeapObject* candidate = HeapObject::FromAddress(memento_address);
......@@ -504,7 +504,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
top = NewSpaceTop();
DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top ||
!NewSpacePage::OnSamePage(memento_address, top - 1));
!Page::OnSamePage(memento_address, top - 1));
if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate;
}
......
......@@ -945,7 +945,7 @@ void Heap::EnsureFillerObjectAtTop() {
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
Address to_top = new_space_.top();
NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
......@@ -1552,7 +1552,8 @@ void PromotionQueue::Initialize() {
front_ = rear_ =
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<struct Entry*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
->area_start());
emergency_stack_ = NULL;
}
......@@ -1560,7 +1561,7 @@ void PromotionQueue::Initialize() {
void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
struct Entry* head_start = rear_;
struct Entry* head_end =
Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
......@@ -1909,13 +1910,14 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front != new_space_.top()) {
if (!NewSpacePage::IsAtEnd(new_space_front)) {
if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front +=
StaticScavengeVisitor::IterateBody(object->map(), object);
} else {
new_space_front =
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
new_space_front = Page::FromAllocationAreaAddress(new_space_front)
->next_page()
->area_start();
}
}
......@@ -4629,7 +4631,7 @@ void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
while (it.has_next()) {
NewSpacePage* page = it.next();
Page* page = it.next();
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
......
......@@ -322,7 +322,7 @@ class PromotionQueue {
}
Page* GetHeadPage() {
return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
}
void SetNewLimit(Address limit) {
......@@ -330,7 +330,7 @@ class PromotionQueue {
if (emergency_stack_) return;
// If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<struct Entry*>(limit);
......
......@@ -348,7 +348,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) {
NewSpacePageIterator it(space);
while (it.has_next()) {
NewSpacePage* p = it.next();
Page* p = it.next();
SetNewSpacePageFlags(p, false);
}
}
......@@ -361,7 +361,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page();
while (lop->is_valid()) {
while (LargePage::IsValid(lop)) {
SetOldSpacePageFlags(lop, false, false);
lop = lop->next_page();
}
......@@ -380,7 +380,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
while (it.has_next()) {
NewSpacePage* p = it.next();
Page* p = it.next();
SetNewSpacePageFlags(p, true);
}
}
......@@ -393,7 +393,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->new_space());
LargePage* lop = heap_->lo_space()->first_page();
while (lop->is_valid()) {
while (LargePage::IsValid(lop)) {
SetOldSpacePageFlags(lop, true, is_compacting_);
lop = lop->next_page();
}
......
......@@ -185,7 +185,7 @@ class IncrementalMarking {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
}
inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking());
}
......
......@@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(),
NewSpacePage::FromAddress(space->bottom())->area_start());
CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
while (it.has_next()) {
NewSpacePage* page = it.next();
Page* page = it.next();
Address limit = it.has_next() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
VerifyMarking(space->heap(), page->area_start(), limit);
......@@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) {
VerifyEvacuationVisitor visitor;
while (it.has_next()) {
NewSpacePage* page = it.next();
Page* page = it.next();
Address current = page->area_start();
Address limit = it.has_next() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top()));
......@@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) {
NewSpacePage* p = it.next();
Page* p = it.next();
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
......@@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
public:
EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
static void MoveToOldSpace(Page* page, PagedSpace* owner) {
page->heap()->new_space()->ReplaceWithEmptyPage(page);
Page* new_page = Page::Convert(page, owner);
Page* new_page = Page::ConvertNewToOld(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
......@@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) {
NewSpacePage* page = it.next();
Page* page = it.next();
DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return;
}
......@@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced {
// evacuation.
static int PageEvacuationThreshold() {
if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
100;
return NewSpacePage::kAllocatableMemory + kPointerSize;
return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
return Page::kAllocatableMemory + kPointerSize;
}
explicit Evacuator(MarkCompactCollector* collector)
......@@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
duration_(0.0),
bytes_compacted_(0) {}
inline bool EvacuatePage(MemoryChunk* chunk);
inline bool EvacuatePage(Page* chunk);
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
......@@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
}
template <IterationMode mode, class Visitor>
inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
MarkCompactCollector* collector_;
......@@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
};
template <MarkCompactCollector::IterationMode mode, class Visitor>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
Visitor* visitor) {
bool success = false;
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
......@@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
return success;
}
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
bool result = false;
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
NewSpacePage::kSweepingDone);
switch (ComputeEvacuationMode(chunk)) {
DCHECK(page->SweepingDone());
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
DCHECK(result);
USE(result);
break;
case kPageNewToOld:
result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
DCHECK(result);
USE(result);
break;
case kObjectsOldToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
if (!result) {
// Aborted compaction page. We can record slots here to have them
// processed in parallel later on.
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
DCHECK(result);
USE(result);
// We need to return failure here to indicate that we want this page
......@@ -3244,7 +3241,7 @@ class EvacuationJobTraits {
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
MemoryChunk* chunk, PerPageData) {
return evacuator->EvacuatePage(chunk);
return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
}
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
......@@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages);
}
const Address age_mark = heap()->new_space()->age_mark();
for (NewSpacePage* page : newspace_evacuation_candidates_) {
for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
......@@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) {
Address space_end = heap->new_space()->top();
NewSpacePageIterator it(space_start, space_end);
while (it.has_next()) {
NewSpacePage* page = it.next();
Page* page = it.next();
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
......
......@@ -862,7 +862,7 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
List<NewSpacePage*> newspace_evacuation_candidates_;
List<Page*> newspace_evacuation_candidates_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
......
......@@ -56,8 +56,8 @@ Page* PageIterator::next() {
HeapObject* SemiSpaceIterator::Next() {
while (current_ != limit_) {
if (NewSpacePage::IsAtEnd(current_)) {
NewSpacePage* page = NewSpacePage::FromLimit(current_);
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
DCHECK(!page->is_anchor());
current_ = page->area_start();
......@@ -80,9 +80,9 @@ HeapObject* SemiSpaceIterator::next_object() { return Next(); }
// NewSpacePageIterator
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
: prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(Page::FromAddress(space->ToSpaceStart())),
last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
: prev_page_(space->anchor()),
......@@ -90,17 +90,16 @@ NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
last_page_(prev_page_->prev_page()) {}
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
: prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
next_page_(NewSpacePage::FromAddress(start)),
last_page_(NewSpacePage::FromLimit(limit)) {
: prev_page_(Page::FromAddress(start)->prev_page()),
next_page_(Page::FromAddress(start)),
last_page_(Page::FromAllocationAreaAddress(limit)) {
SemiSpace::AssertValidRange(start, limit);
}
bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
NewSpacePage* NewSpacePageIterator::next() {
Page* NewSpacePageIterator::next() {
DCHECK(has_next());
prev_page_ = next_page_;
next_page_ = next_page_->next_page();
......@@ -244,20 +243,18 @@ bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
size_t NewSpace::AllocatedSinceLastGC() {
const intptr_t age_mark_offset =
NewSpacePage::OffsetInPage(to_space_.age_mark());
const intptr_t top_offset =
NewSpacePage::OffsetInPage(allocation_info_.top());
const intptr_t age_mark_offset = Page::OffsetInPage(to_space_.age_mark());
const intptr_t top_offset = Page::OffsetInPage(allocation_info_.top());
const intptr_t age_mark_delta =
age_mark_offset >= NewSpacePage::kObjectStartOffset
? age_mark_offset - NewSpacePage::kObjectStartOffset
: NewSpacePage::kAllocatableMemory;
const intptr_t top_delta = top_offset >= NewSpacePage::kObjectStartOffset
? top_offset - NewSpacePage::kObjectStartOffset
: NewSpacePage::kAllocatableMemory;
age_mark_offset >= Page::kObjectStartOffset
? age_mark_offset - Page::kObjectStartOffset
: Page::kAllocatableMemory;
const intptr_t top_delta = top_offset >= Page::kObjectStartOffset
? top_offset - Page::kObjectStartOffset
: Page::kAllocatableMemory;
DCHECK((allocated_since_last_gc_ > 0) ||
(NewSpacePage::FromLimit(allocation_info_.top()) ==
NewSpacePage::FromLimit(to_space_.age_mark())));
(Page::FromAllocationAreaAddress(allocation_info_.top()) ==
Page::FromAllocationAreaAddress(to_space_.age_mark())));
return static_cast<size_t>(allocated_since_last_gc_ + top_delta -
age_mark_delta);
}
......@@ -270,16 +267,15 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
}
NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable,
SemiSpace* owner) {
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page);
return page;
}
......@@ -309,7 +305,8 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page;
}
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) {
Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
DCHECK(old_page->InNewSpace());
old_page->set_owner(new_owner);
old_page->SetFlags(0, ~0);
new_owner->AccountCommitted(old_page->size());
......@@ -359,14 +356,14 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
if (!p->is_valid()) return false;
if (!Page::IsValid(p)) return false;
return p->owner() == this;
}
bool PagedSpace::Contains(Object* o) {
if (!o->IsHeapObject()) return false;
Page* p = Page::FromAddress(HeapObject::cast(o)->address());
if (!p->is_valid()) return false;
if (!Page::IsValid(p)) return false;
return p->owner() == this;
}
......@@ -472,16 +469,6 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
void Page::set_next_page(Page* page) {
DCHECK(page->owner() == owner());
set_next_chunk(page);
}
void Page::set_prev_page(Page* page) {
DCHECK(page->owner() == owner());
set_prev_chunk(page);
}
Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this));
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -6622,15 +6622,14 @@ UNINITIALIZED_TEST(PagePromotion) {
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
NewSpacePage* first_page =
NewSpacePage::FromAddress(first_object->address());
Page* first_page = Page::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100;
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
......
......@@ -315,12 +315,12 @@ TEST(MemoryAllocator) {
{
int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage<Page>(
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
CHECK(Page::IsValid(first_page));
CHECK(first_page->next_page() == faked_space.anchor());
total_pages++;
......@@ -329,10 +329,10 @@ TEST(MemoryAllocator) {
}
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage<Page>(
Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
CHECK(other->is_valid());
CHECK(Page::IsValid(other));
total_pages++;
other->InsertAfter(first_page);
int page_count = 0;
......@@ -343,7 +343,7 @@ TEST(MemoryAllocator) {
CHECK(total_pages == page_count);
Page* second_page = first_page->next_page();
CHECK(second_page->is_valid());
CHECK(Page::IsValid(second_page));
// OldSpace's destructor will tear down the space and free up all pages.
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment