Commit 42ece474 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Remove border page

A page now belongs either the nursery *or* the intermediate gen. The page that
contained objects of both spaces is removed in this change.

BUG=chromium:636331

Review-Url: https://codereview.chromium.org/2209583002
Cr-Commit-Position: refs/heads/master@{#39778}
parent 0fb486fe
......@@ -519,9 +519,6 @@ bool Heap::OldGenerationAllocationLimitReached() {
template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
if (promotion_mode == PROMOTE_MARKED) {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
if (!Marking::IsWhite(mark_bit)) {
......@@ -529,8 +526,7 @@ bool Heap::ShouldBePromoted(Address old_address, int object_size) {
}
}
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
return Page::FromAddress(old_address)->InIntermediateGeneration();
}
PromotionMode Heap::CurrentPromotionMode() {
......@@ -631,16 +627,8 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Bail out if the memento is below the age mark, which can happen when
// mementos survived because a page got moved within new space.
Page* object_page = Page::FromAddress(object_address);
if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
Address age_mark =
reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
if (!object_page->Contains(age_mark)) {
return nullptr;
}
// Do an exact check in the case where the age mark is on the same page.
if (object_address < age_mark) {
return nullptr;
}
if (object_page->InIntermediateGeneration()) {
return nullptr;
}
AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
......
......@@ -1755,8 +1755,7 @@ void Heap::Scavenge() {
DCHECK(new_space_front == new_space_->top());
// Set age mark.
new_space_->set_age_mark(new_space_->top());
new_space_->SealIntermediateGeneration();
ArrayBufferTracker::FreeDeadInNewSpace(this);
......@@ -5107,7 +5106,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
}
initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
if (FLAG_semi_space_growth_factor < 2) {
FLAG_semi_space_growth_factor = 2;
}
......
......@@ -1364,7 +1364,6 @@ class Heap {
}
inline void UpdateNewSpaceAllocationCounter();
inline size_t NewSpaceAllocationCounter();
// This should be used only for testing.
......
......@@ -3188,15 +3188,13 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
if (FLAG_trace_evacuation) {
PrintIsolate(heap->isolate(),
"evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%d time=%f\n",
"page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
static_cast<void*>(this), static_cast<void*>(page),
page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap->new_space()->age_mark()),
saved_live_bytes, evacuation_time);
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
evacuation_time);
}
return success;
}
......@@ -3306,13 +3304,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
job.AddPage(page, &abandoned_pages);
}
const Address age_mark = heap()->new_space()->age_mark();
for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
!page->Contains(age_mark)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
(page->LiveBytes() > Evacuator::PageEvacuationThreshold())) {
if (page->InIntermediateGeneration()) {
EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
} else {
EvacuateNewSpacePageVisitor::MoveToToSpace(page);
......@@ -3558,7 +3554,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
heap()->new_space()->set_age_mark(heap()->new_space()->top());
heap()->new_space()->SealIntermediateGeneration();
}
UpdatePointersAfterEvacuation();
......
......@@ -165,6 +165,22 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
size_t NewSpace::AllocatedSinceLastGC() {
Page* top_page = Page::FromAllocationAreaAddress(top());
size_t allocated = 0;
// If top gets reset to be in the range of pages that are below the age
// mark, this loop will not trigger and we return 0 (invalid).
for (Page* current_page = top_page;
!current_page->InIntermediateGeneration() &&
current_page != to_space_.anchor();
current_page = current_page->prev_page()) {
allocated += (top_page == current_page)
? static_cast<size_t>(top() - current_page->area_start())
: Page::kAllocatableMemory;
}
return allocated;
}
// --------------------------------------------------------------------------
// AllocationResult
......
......@@ -1485,8 +1485,9 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
bool NewSpace::SetUp(int initial_semispace_capacity,
int maximum_semispace_capacity) {
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
DCHECK_LE(initial_semispace_capacity, maximum_semispace_capacity);
DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
DCHECK_GE(initial_semispace_capacity, 2 * Page::kPageSize);
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
......@@ -1587,8 +1588,16 @@ bool SemiSpace::EnsureCurrentCapacity() {
current_page = current_page->next_page();
if (actual_pages > expected_pages) {
Page* to_remove = current_page->prev_page();
// Make sure we don't overtake the actual top pointer.
CHECK_NE(to_remove, current_page_);
if (to_remove == current_page_) {
// Corner case: All pages have been moved within new space. We are
// removing the page that contains the top pointer and need to set
// it to the end of the intermediate generation.
NewSpace* new_space = heap()->new_space();
CHECK_EQ(new_space->top(), current_page_->area_start());
current_page_ = to_remove->prev_page();
CHECK(current_page_->InIntermediateGeneration());
new_space->SetAllocationInfo(page_high(), page_high());
}
to_remove->Unlink();
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove);
......@@ -1914,9 +1923,6 @@ bool SemiSpace::Commit() {
}
Reset();
AccountCommitted(current_capacity_);
if (age_mark_ == nullptr) {
age_mark_ = first_page()->area_start();
}
committed_ = true;
return true;
}
......@@ -2028,7 +2034,7 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
if (id_ == kToSpace) {
page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
page->SetFlag(MemoryChunk::IN_TO_SPACE);
page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
page->ClearFlag(MemoryChunk::IN_INTERMEDIATE_GENERATION);
page->ResetLiveBytes();
} else {
page->SetFlag(MemoryChunk::IN_FROM_SPACE);
......@@ -2071,7 +2077,6 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->current_capacity_, to->current_capacity_);
std::swap(from->maximum_capacity_, to->maximum_capacity_);
std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_);
std::swap(from->anchor_, to->anchor_);
std::swap(from->current_page_, to->current_page_);
......@@ -2080,16 +2085,39 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
from->FixPagesFlags(0, 0);
}
void NewSpace::SealIntermediateGeneration() {
fragmentation_in_intermediate_generation_ = 0;
const Address mark = top();
if (mark == to_space_.space_start()) {
// Do not mark any pages as being part of the intermediate generation if no
// objects got moved.
return;
}
void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
for (Page* p : NewSpacePageRange(space_start(), mark)) {
p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
for (Page* p : NewSpacePageRange(to_space_.space_start(), mark)) {
p->SetFlag(MemoryChunk::IN_INTERMEDIATE_GENERATION);
}
}
Page* p = Page::FromAllocationAreaAddress(mark);
if (mark < p->area_end()) {
heap()->CreateFillerObjectAt(mark, static_cast<int>(p->area_end() - mark),
ClearRecordedSlots::kNo);
fragmentation_in_intermediate_generation_ =
static_cast<size_t>(p->area_end() - mark);
DCHECK_EQ(to_space_.current_page(), p);
if (to_space_.AdvancePage()) {
UpdateAllocationInfo();
} else {
allocation_info_.Reset(to_space_.page_high(), to_space_.page_high());
}
}
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(),
"Sealing intermediate generation: bytes_lost=%zu\n",
fragmentation_in_intermediate_generation_);
}
}
#ifdef DEBUG
void SemiSpace::Print() {}
......
......@@ -234,10 +234,14 @@ class MemoryChunk {
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
// A page in new space has one of the next to flags set.
IN_FROM_SPACE = 1u << 3,
IN_TO_SPACE = 1u << 4,
NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
// |IN_INTERMEDIATE_GENERATION|: Flag indicates whether this page contains
// objects that have already been copied once.
IN_INTERMEDIATE_GENERATION = 1u << 5,
EVACUATION_CANDIDATE = 1u << 6,
NEVER_EVACUATE = 1u << 7,
......@@ -559,6 +563,10 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
bool InIntermediateGeneration() {
return IsFlagSet(IN_INTERMEDIATE_GENERATION);
}
MemoryChunk* next_chunk() { return next_chunk_.Value(); }
MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
......@@ -2224,7 +2232,6 @@ class SemiSpace : public Space {
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
age_mark_(nullptr),
committed_(false),
id_(semispace),
anchor_(this),
......@@ -2293,10 +2300,6 @@ class SemiSpace : public Space {
void RemovePage(Page* page);
void PrependPage(Page* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
// Returns the current capacity of the semispace.
int current_capacity() { return current_capacity_; }
......@@ -2363,9 +2366,6 @@ class SemiSpace : public Space {
// The minimum capacity for the space. A space cannot shrink below this size.
int minimum_capacity_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
bool committed_;
SemiSpaceId id_;
......@@ -2416,7 +2416,8 @@ class NewSpace : public Space {
reservation_(),
top_on_previous_step_(0),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
promoted_histogram_(nullptr),
fragmentation_in_intermediate_generation_(0) {}
inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a);
......@@ -2449,6 +2450,11 @@ class NewSpace : public Space {
static_cast<int>(top() - to_space_.page_low());
}
intptr_t SizeOfObjects() override {
return Size() -
static_cast<intptr_t>(fragmentation_in_intermediate_generation_);
}
// The same, but returning an int. We have to have the one that returns
// intptr_t because it is inherited, but if we know we are dealing with the
// new space, which can't get as big as the other spaces then this is useful:
......@@ -2485,42 +2491,7 @@ class NewSpace : public Space {
// Return the available bytes without growing.
intptr_t Available() override { return Capacity() - Size(); }
size_t AllocatedSinceLastGC() {
bool seen_age_mark = false;
Address age_mark = to_space_.age_mark();
Page* current_page = to_space_.first_page();
Page* age_mark_page = Page::FromAddress(age_mark);
Page* last_page = Page::FromAddress(top() - kPointerSize);
if (age_mark_page == last_page) {
if (top() - age_mark >= 0) {
return top() - age_mark;
}
// Top was reset at some point, invalidating this metric.
return 0;
}
while (current_page != last_page) {
if (current_page == age_mark_page) {
seen_age_mark = true;
break;
}
current_page = current_page->next_page();
}
if (!seen_age_mark) {
// Top was reset at some point, invalidating this metric.
return 0;
}
intptr_t allocated = age_mark_page->area_end() - age_mark;
DCHECK_EQ(current_page, age_mark_page);
current_page = age_mark_page->next_page();
while (current_page != last_page) {
allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
allocated += top() - current_page->area_start();
DCHECK_LE(0, allocated);
DCHECK_LE(allocated, Size());
return static_cast<size_t>(allocated);
}
inline size_t AllocatedSinceLastGC();
void MovePageFromSpaceToSpace(Page* page) {
DCHECK(page->InFromSpace());
......@@ -2559,10 +2530,8 @@ class NewSpace : public Space {
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
// Seal the intermediate generation of the active semispace.
void SealIntermediateGeneration();
// The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
......@@ -2587,6 +2556,10 @@ class NewSpace : public Space {
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
void SetAllocationInfo(Address top, Address limit) {
allocation_info_.Reset(top, limit);
}
// When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting
......@@ -2695,6 +2668,8 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
size_t fragmentation_in_intermediate_generation_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
// If we are doing inline allocation in steps, this method performs the 'step'
......
......@@ -7110,5 +7110,20 @@ TEST(RememberedSetRemoveRange) {
});
}
TEST(EmptyIntermediateGeneration) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
heap::GcAndSweep(heap, OLD_SPACE);
v8::HandleScope scope(CcTest::isolate());
{
v8::HandleScope temp_scope(CcTest::isolate());
heap::SimulateFullSpace(heap->new_space());
}
heap::GcAndSweep(heap, OLD_SPACE);
for (Page* p : *heap->new_space()) {
CHECK(!p->InIntermediateGeneration());
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment