Commit 9af3142f authored by mlippautz's avatar mlippautz Committed by Commit bot

Revert of [heap] Remove border page

Reason for revert:
No real improvement as we still lack the ability to promote from
scavenges/young gen GCs.

Let's keep this in mind for later.

Original issue's description:
> [heap] Remove border page
>
> A page now belongs either the nursery *or* the intermediate gen. The page that
> contained objects of both spaces is removed in this change.
>
> BUG=chromium:636331
>
> Committed: https://crrev.com/42ece47446f0dbd3779d6e0e00dce97a1931a9f9
> Cr-Commit-Position: refs/heads/master@{#39778}

TBR=ulan@chromium.org,hpayer@chromium.org
BUG=chromium:636331

Review-Url: https://codereview.chromium.org/2383443002
Cr-Commit-Position: refs/heads/master@{#39854}
parent 1e937f66
...@@ -482,6 +482,9 @@ bool Heap::InOldSpaceSlow(Address address) { ...@@ -482,6 +482,9 @@ bool Heap::InOldSpaceSlow(Address address) {
template <PromotionMode promotion_mode> template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) { bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
if (promotion_mode == PROMOTE_MARKED) { if (promotion_mode == PROMOTE_MARKED) {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address); MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
if (!Marking::IsWhite(mark_bit)) { if (!Marking::IsWhite(mark_bit)) {
...@@ -489,7 +492,8 @@ bool Heap::ShouldBePromoted(Address old_address, int object_size) { ...@@ -489,7 +492,8 @@ bool Heap::ShouldBePromoted(Address old_address, int object_size) {
} }
} }
return Page::FromAddress(old_address)->InIntermediateGeneration(); return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
} }
PromotionMode Heap::CurrentPromotionMode() { PromotionMode Heap::CurrentPromotionMode() {
...@@ -590,8 +594,16 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { ...@@ -590,8 +594,16 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Bail out if the memento is below the age mark, which can happen when // Bail out if the memento is below the age mark, which can happen when
// mementos survived because a page got moved within new space. // mementos survived because a page got moved within new space.
Page* object_page = Page::FromAddress(object_address); Page* object_page = Page::FromAddress(object_address);
if (object_page->InIntermediateGeneration()) { if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
return nullptr; Address age_mark =
reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
if (!object_page->Contains(age_mark)) {
return nullptr;
}
// Do an exact check in the case where the age mark is on the same page.
if (object_address < age_mark) {
return nullptr;
}
} }
AllocationMemento* memento_candidate = AllocationMemento::cast(candidate); AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
......
...@@ -1733,7 +1733,8 @@ void Heap::Scavenge() { ...@@ -1733,7 +1733,8 @@ void Heap::Scavenge() {
DCHECK(new_space_front == new_space_->top()); DCHECK(new_space_front == new_space_->top());
new_space_->SealIntermediateGeneration(); // Set age mark.
new_space_->set_age_mark(new_space_->top());
ArrayBufferTracker::FreeDeadInNewSpace(this); ArrayBufferTracker::FreeDeadInNewSpace(this);
...@@ -5082,6 +5083,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, ...@@ -5082,6 +5083,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
} }
initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_); initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
if (FLAG_semi_space_growth_factor < 2) { if (FLAG_semi_space_growth_factor < 2) {
FLAG_semi_space_growth_factor = 2; FLAG_semi_space_growth_factor = 2;
} }
......
...@@ -1364,6 +1364,7 @@ class Heap { ...@@ -1364,6 +1364,7 @@ class Heap {
} }
inline void UpdateNewSpaceAllocationCounter(); inline void UpdateNewSpaceAllocationCounter();
inline size_t NewSpaceAllocationCounter(); inline size_t NewSpaceAllocationCounter();
// This should be used only for testing. // This should be used only for testing.
......
...@@ -3147,13 +3147,15 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { ...@@ -3147,13 +3147,15 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
PrintIsolate(heap->isolate(), PrintIsolate(heap->isolate(),
"evacuation[%p]: page=%p new_space=%d " "evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d live_bytes=%d time=%f\n", "page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%d time=%f\n",
static_cast<void*>(this), static_cast<void*>(page), static_cast<void*>(this), static_cast<void*>(page),
page->InNewSpace(), page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
evacuation_time); page->Contains(heap->new_space()->age_mark()),
saved_live_bytes, evacuation_time);
} }
return success; return success;
} }
...@@ -3264,11 +3266,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3264,11 +3266,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
} }
const bool reduce_memory = heap()->ShouldReduceMemory(); const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
for (Page* page : newspace_evacuation_candidates_) { for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
if (!reduce_memory && !page->NeverEvacuate() && if (!reduce_memory && !page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold())) { (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
if (page->InIntermediateGeneration()) { !page->Contains(age_mark)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
} else { } else {
EvacuateNewSpacePageVisitor::MoveToToSpace(page); EvacuateNewSpacePageVisitor::MoveToToSpace(page);
...@@ -3514,7 +3518,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { ...@@ -3514,7 +3518,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpacePrologue(); EvacuateNewSpacePrologue();
EvacuatePagesInParallel(); EvacuatePagesInParallel();
heap()->new_space()->SealIntermediateGeneration(); heap()->new_space()->set_age_mark(heap()->new_space()->top());
} }
UpdatePointersAfterEvacuation(); UpdatePointersAfterEvacuation();
......
...@@ -165,22 +165,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) { ...@@ -165,22 +165,6 @@ bool NewSpace::FromSpaceContainsSlow(Address a) {
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); } bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); } bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
size_t NewSpace::AllocatedSinceLastGC() {
Page* top_page = Page::FromAllocationAreaAddress(top());
size_t allocated = 0;
// If top gets reset to be in the range of pages that are below the age
// mark, this loop will not trigger and we return 0 (invalid).
for (Page* current_page = top_page;
!current_page->InIntermediateGeneration() &&
current_page != to_space_.anchor();
current_page = current_page->prev_page()) {
allocated += (top_page == current_page)
? static_cast<size_t>(top() - current_page->area_start())
: Page::kAllocatableMemory;
}
return allocated;
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// AllocationResult // AllocationResult
......
...@@ -1489,9 +1489,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1489,9 +1489,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
bool NewSpace::SetUp(int initial_semispace_capacity, bool NewSpace::SetUp(int initial_semispace_capacity,
int maximum_semispace_capacity) { int maximum_semispace_capacity) {
DCHECK_LE(initial_semispace_capacity, maximum_semispace_capacity); DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity)); DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
DCHECK_GE(initial_semispace_capacity, 2 * Page::kPageSize);
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity); to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity); from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
...@@ -1592,16 +1591,8 @@ bool SemiSpace::EnsureCurrentCapacity() { ...@@ -1592,16 +1591,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
current_page = current_page->next_page(); current_page = current_page->next_page();
if (actual_pages > expected_pages) { if (actual_pages > expected_pages) {
Page* to_remove = current_page->prev_page(); Page* to_remove = current_page->prev_page();
if (to_remove == current_page_) { // Make sure we don't overtake the actual top pointer.
// Corner case: All pages have been moved within new space. We are CHECK_NE(to_remove, current_page_);
// removing the page that contains the top pointer and need to set
// it to the end of the intermediate generation.
NewSpace* new_space = heap()->new_space();
CHECK_EQ(new_space->top(), current_page_->area_start());
current_page_ = to_remove->prev_page();
CHECK(current_page_->InIntermediateGeneration());
new_space->SetAllocationInfo(page_high(), page_high());
}
to_remove->Unlink(); to_remove->Unlink();
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>( heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove); to_remove);
...@@ -1931,6 +1922,9 @@ bool SemiSpace::Commit() { ...@@ -1931,6 +1922,9 @@ bool SemiSpace::Commit() {
} }
Reset(); Reset();
AccountCommitted(current_capacity_); AccountCommitted(current_capacity_);
if (age_mark_ == nullptr) {
age_mark_ = first_page()->area_start();
}
committed_ = true; committed_ = true;
return true; return true;
} }
...@@ -2042,7 +2036,7 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { ...@@ -2042,7 +2036,7 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
if (id_ == kToSpace) { if (id_ == kToSpace) {
page->ClearFlag(MemoryChunk::IN_FROM_SPACE); page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
page->SetFlag(MemoryChunk::IN_TO_SPACE); page->SetFlag(MemoryChunk::IN_TO_SPACE);
page->ClearFlag(MemoryChunk::IN_INTERMEDIATE_GENERATION); page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
page->ResetLiveBytes(); page->ResetLiveBytes();
} else { } else {
page->SetFlag(MemoryChunk::IN_FROM_SPACE); page->SetFlag(MemoryChunk::IN_FROM_SPACE);
...@@ -2085,6 +2079,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { ...@@ -2085,6 +2079,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->current_capacity_, to->current_capacity_); std::swap(from->current_capacity_, to->current_capacity_);
std::swap(from->maximum_capacity_, to->maximum_capacity_); std::swap(from->maximum_capacity_, to->maximum_capacity_);
std::swap(from->minimum_capacity_, to->minimum_capacity_); std::swap(from->minimum_capacity_, to->minimum_capacity_);
std::swap(from->age_mark_, to->age_mark_);
std::swap(from->committed_, to->committed_); std::swap(from->committed_, to->committed_);
std::swap(from->anchor_, to->anchor_); std::swap(from->anchor_, to->anchor_);
std::swap(from->current_page_, to->current_page_); std::swap(from->current_page_, to->current_page_);
...@@ -2093,37 +2088,12 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { ...@@ -2093,37 +2088,12 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
from->FixPagesFlags(0, 0); from->FixPagesFlags(0, 0);
} }
void NewSpace::SealIntermediateGeneration() { void SemiSpace::set_age_mark(Address mark) {
fragmentation_in_intermediate_generation_ = 0; DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
const Address mark = top(); age_mark_ = mark;
// Mark all pages up to the one containing mark.
if (mark == to_space_.space_start()) { for (Page* p : NewSpacePageRange(space_start(), mark)) {
// Do not mark any pages as being part of the intermediate generation if no p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
// objects got moved.
return;
}
for (Page* p : NewSpacePageRange(to_space_.space_start(), mark)) {
p->SetFlag(MemoryChunk::IN_INTERMEDIATE_GENERATION);
}
Page* p = Page::FromAllocationAreaAddress(mark);
if (mark < p->area_end()) {
heap()->CreateFillerObjectAt(mark, static_cast<int>(p->area_end() - mark),
ClearRecordedSlots::kNo);
fragmentation_in_intermediate_generation_ =
static_cast<size_t>(p->area_end() - mark);
DCHECK_EQ(to_space_.current_page(), p);
if (to_space_.AdvancePage()) {
UpdateAllocationInfo();
} else {
allocation_info_.Reset(to_space_.page_high(), to_space_.page_high());
}
}
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(),
"Sealing intermediate generation: bytes_lost=%zu\n",
fragmentation_in_intermediate_generation_);
} }
} }
......
...@@ -234,14 +234,10 @@ class MemoryChunk { ...@@ -234,14 +234,10 @@ class MemoryChunk {
IS_EXECUTABLE = 1u << 0, IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1, POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2, POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
// A page in new space has one of the next to flags set. // A page in new space has one of the next to flags set.
IN_FROM_SPACE = 1u << 3, IN_FROM_SPACE = 1u << 3,
IN_TO_SPACE = 1u << 4, IN_TO_SPACE = 1u << 4,
// |IN_INTERMEDIATE_GENERATION|: Flag indicates whether this page contains NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
// objects that have already been copied once.
IN_INTERMEDIATE_GENERATION = 1u << 5,
EVACUATION_CANDIDATE = 1u << 6, EVACUATION_CANDIDATE = 1u << 6,
NEVER_EVACUATE = 1u << 7, NEVER_EVACUATE = 1u << 7,
...@@ -563,10 +559,6 @@ class MemoryChunk { ...@@ -563,10 +559,6 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
bool InIntermediateGeneration() {
return IsFlagSet(IN_INTERMEDIATE_GENERATION);
}
MemoryChunk* next_chunk() { return next_chunk_.Value(); } MemoryChunk* next_chunk() { return next_chunk_.Value(); }
MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
...@@ -2235,6 +2227,7 @@ class SemiSpace : public Space { ...@@ -2235,6 +2227,7 @@ class SemiSpace : public Space {
current_capacity_(0), current_capacity_(0),
maximum_capacity_(0), maximum_capacity_(0),
minimum_capacity_(0), minimum_capacity_(0),
age_mark_(nullptr),
committed_(false), committed_(false),
id_(semispace), id_(semispace),
anchor_(this), anchor_(this),
...@@ -2303,6 +2296,10 @@ class SemiSpace : public Space { ...@@ -2303,6 +2296,10 @@ class SemiSpace : public Space {
void RemovePage(Page* page); void RemovePage(Page* page);
void PrependPage(Page* page); void PrependPage(Page* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
// Returns the current capacity of the semispace. // Returns the current capacity of the semispace.
int current_capacity() { return current_capacity_; } int current_capacity() { return current_capacity_; }
...@@ -2371,6 +2368,9 @@ class SemiSpace : public Space { ...@@ -2371,6 +2368,9 @@ class SemiSpace : public Space {
// The minimum capacity for the space. A space cannot shrink below this size. // The minimum capacity for the space. A space cannot shrink below this size.
int minimum_capacity_; int minimum_capacity_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
bool committed_; bool committed_;
SemiSpaceId id_; SemiSpaceId id_;
...@@ -2421,8 +2421,7 @@ class NewSpace : public Space { ...@@ -2421,8 +2421,7 @@ class NewSpace : public Space {
reservation_(), reservation_(),
top_on_previous_step_(0), top_on_previous_step_(0),
allocated_histogram_(nullptr), allocated_histogram_(nullptr),
promoted_histogram_(nullptr), promoted_histogram_(nullptr) {}
fragmentation_in_intermediate_generation_(0) {}
inline bool Contains(HeapObject* o); inline bool Contains(HeapObject* o);
inline bool ContainsSlow(Address a); inline bool ContainsSlow(Address a);
...@@ -2455,10 +2454,7 @@ class NewSpace : public Space { ...@@ -2455,10 +2454,7 @@ class NewSpace : public Space {
static_cast<int>(top() - to_space_.page_low()); static_cast<int>(top() - to_space_.page_low());
} }
intptr_t SizeOfObjects() override { intptr_t SizeOfObjects() override { return Size(); }
return Size() -
static_cast<intptr_t>(fragmentation_in_intermediate_generation_);
}
// Return the allocatable capacity of a semispace. // Return the allocatable capacity of a semispace.
intptr_t Capacity() { intptr_t Capacity() {
...@@ -2491,7 +2487,42 @@ class NewSpace : public Space { ...@@ -2491,7 +2487,42 @@ class NewSpace : public Space {
// Return the available bytes without growing. // Return the available bytes without growing.
intptr_t Available() override { return Capacity() - Size(); } intptr_t Available() override { return Capacity() - Size(); }
inline size_t AllocatedSinceLastGC(); size_t AllocatedSinceLastGC() {
bool seen_age_mark = false;
Address age_mark = to_space_.age_mark();
Page* current_page = to_space_.first_page();
Page* age_mark_page = Page::FromAddress(age_mark);
Page* last_page = Page::FromAddress(top() - kPointerSize);
if (age_mark_page == last_page) {
if (top() - age_mark >= 0) {
return top() - age_mark;
}
// Top was reset at some point, invalidating this metric.
return 0;
}
while (current_page != last_page) {
if (current_page == age_mark_page) {
seen_age_mark = true;
break;
}
current_page = current_page->next_page();
}
if (!seen_age_mark) {
// Top was reset at some point, invalidating this metric.
return 0;
}
intptr_t allocated = age_mark_page->area_end() - age_mark;
DCHECK_EQ(current_page, age_mark_page);
current_page = age_mark_page->next_page();
while (current_page != last_page) {
allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
allocated += top() - current_page->area_start();
DCHECK_LE(0, allocated);
DCHECK_LE(allocated, Size());
return static_cast<size_t>(allocated);
}
void MovePageFromSpaceToSpace(Page* page) { void MovePageFromSpaceToSpace(Page* page) {
DCHECK(page->InFromSpace()); DCHECK(page->InFromSpace());
...@@ -2530,8 +2561,10 @@ class NewSpace : public Space { ...@@ -2530,8 +2561,10 @@ class NewSpace : public Space {
// Return the address of the first object in the active semispace. // Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); } Address bottom() { return to_space_.space_start(); }
// Seal the intermediate generation of the active semispace. // Get the age mark of the inactive semispace.
void SealIntermediateGeneration(); Address age_mark() { return from_space_.age_mark(); }
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
// The allocation top and limit address. // The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); } Address* allocation_top_address() { return allocation_info_.top_address(); }
...@@ -2556,10 +2589,6 @@ class NewSpace : public Space { ...@@ -2556,10 +2589,6 @@ class NewSpace : public Space {
// Reset the allocation pointer to the beginning of the active semispace. // Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo(); void ResetAllocationInfo();
void SetAllocationInfo(Address top, Address limit) {
allocation_info_.Reset(top, limit);
}
// When inline allocation stepping is active, either because of incremental // When inline allocation stepping is active, either because of incremental
// marking, idle scavenge, or allocation statistics gathering, we 'interrupt' // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
// inline allocation every once in a while. This is done by setting // inline allocation every once in a while. This is done by setting
...@@ -2670,8 +2699,6 @@ class NewSpace : public Space { ...@@ -2670,8 +2699,6 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_; HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_; HistogramInfo* promoted_histogram_;
size_t fragmentation_in_intermediate_generation_;
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
// If we are doing inline allocation in steps, this method performs the 'step' // If we are doing inline allocation in steps, this method performs the 'step'
......
...@@ -7111,20 +7111,5 @@ TEST(RememberedSetRemoveRange) { ...@@ -7111,20 +7111,5 @@ TEST(RememberedSetRemoveRange) {
}); });
} }
TEST(EmptyIntermediateGeneration) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
heap::GcAndSweep(heap, OLD_SPACE);
v8::HandleScope scope(CcTest::isolate());
{
v8::HandleScope temp_scope(CcTest::isolate());
heap::SimulateFullSpace(heap->new_space());
}
heap::GcAndSweep(heap, OLD_SPACE);
for (Page* p : *heap->new_space()) {
CHECK(!p->InIntermediateGeneration());
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment