Commit 8b333727 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of 🏄 [heap] Add page evacuation mode for new->old (patchset #21...

Revert of 🏄 [heap] Add page evacuation mode for new->old (patchset #21 id:800001 of https://codereview.chromium.org/1863983002/ )

Reason for revert:
[Sheriff] Breaks:
https://build.chromium.org/p/client.v8.ports/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20nosnap%20-%20debug/builds/102

Original issue's description:
> [heap] Add page evacuation mode for new->old
>
> In a full mark-compact GC, instead of copying memory to old space for
> pages that have more than X% live bytes, we just move the whole page over to old
> space.
>
> X=70 (default value)
>
> BUG=chromium:581412
> LOG=N
>
> Committed: https://crrev.com/0d7e23a6edd3822970983030a77a5b80cd337911
> Cr-Commit-Position: refs/heads/master@{#35610}

TBR=hpayer@chromium.org,ulan@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:581412

Review URL: https://codereview.chromium.org/1896883003

Cr-Commit-Position: refs/heads/master@{#35619}
parent 623ad7de
...@@ -253,9 +253,6 @@ DEFINE_BOOL(compiled_keyed_generic_loads, false, ...@@ -253,9 +253,6 @@ DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs") "use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(allocation_site_pretenuring, true, DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites") "pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false, DEFINE_BOOL(trace_pretenuring, false,
"trace pretenuring decisions of HAllocate instructions") "trace pretenuring decisions of HAllocate instructions")
DEFINE_BOOL(trace_pretenuring_statistics, false, DEFINE_BOOL(trace_pretenuring_statistics, false,
......
...@@ -6217,6 +6217,7 @@ void DescriptorLookupCache::Clear() { ...@@ -6217,6 +6217,7 @@ void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL; for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
} }
void Heap::ExternalStringTable::CleanUp() { void Heap::ExternalStringTable::CleanUp() {
int last = 0; int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) { for (int i = 0; i < new_space_strings_.length(); ++i) {
...@@ -6251,6 +6252,7 @@ void Heap::ExternalStringTable::CleanUp() { ...@@ -6251,6 +6252,7 @@ void Heap::ExternalStringTable::CleanUp() {
#endif #endif
} }
void Heap::ExternalStringTable::TearDown() { void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) { for (int i = 0; i < new_space_strings_.length(); ++i) {
heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i])); heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
......
...@@ -1666,7 +1666,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final ...@@ -1666,7 +1666,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
semispace_copied_size_(0), semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {} local_pretenuring_feedback_(local_pretenuring_feedback) {}
inline bool Visit(HeapObject* object) override { bool Visit(HeapObject* object) override {
heap_->UpdateAllocationSite<Heap::kCached>(object, heap_->UpdateAllocationSite<Heap::kCached>(object,
local_pretenuring_feedback_); local_pretenuring_feedback_);
int size = object->Size(); int size = object->Size();
...@@ -1798,33 +1798,6 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final ...@@ -1798,33 +1798,6 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
HashMap* local_pretenuring_feedback_; HashMap* local_pretenuring_feedback_;
}; };
class MarkCompactCollector::EvacuateNewSpacePageVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
page->heap()->new_space()->ReplaceWithEmptyPage(page);
Page* new_page = Page::Convert(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
inline bool Visit(HeapObject* object) {
if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
object->GetHeap()->array_buffer_tracker()->Promote(
JSArrayBuffer::cast(object));
}
RecordMigratedSlotVisitor visitor;
object->IterateBodyFast(&visitor);
promoted_size_ += object->Size();
return true;
}
intptr_t promoted_size() { return promoted_size_; }
private:
intptr_t promoted_size_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor final class MarkCompactCollector::EvacuateOldSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase { : public MarkCompactCollector::EvacuateVisitorBase {
...@@ -1833,7 +1806,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final ...@@ -1833,7 +1806,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
CompactionSpaceCollection* compaction_spaces) CompactionSpaceCollection* compaction_spaces)
: EvacuateVisitorBase(heap, compaction_spaces) {} : EvacuateVisitorBase(heap, compaction_spaces) {}
inline bool Visit(HeapObject* object) override { bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get( CompactionSpace* target_space = compaction_spaces_->Get(
Page::FromAddress(object->address())->owner()->identity()); Page::FromAddress(object->address())->owner()->identity());
HeapObject* target_object = nullptr; HeapObject* target_object = nullptr;
...@@ -3048,17 +3021,9 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() { ...@@ -3048,17 +3021,9 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
newspace_evacuation_candidates_.Rewind(0); newspace_evacuation_candidates_.Rewind(0);
} }
class MarkCompactCollector::Evacuator : public Malloced { class MarkCompactCollector::Evacuator : public Malloced {
public: public:
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
static int PageEvacuationThreshold() {
if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
100;
return NewSpacePage::kAllocatableMemory + kPointerSize;
}
explicit Evacuator(MarkCompactCollector* collector) explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector), : collector_(collector),
compaction_spaces_(collector->heap()), compaction_spaces_(collector->heap()),
...@@ -3066,7 +3031,6 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3066,7 +3031,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
kInitialLocalPretenuringFeedbackCapacity), kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_, new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_), &local_pretenuring_feedback_),
new_space_page_visitor(),
old_space_visitor_(collector->heap(), &compaction_spaces_), old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0), duration_(0.0),
bytes_compacted_(0) {} bytes_compacted_(0) {}
...@@ -3080,32 +3044,17 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3080,32 +3044,17 @@ class MarkCompactCollector::Evacuator : public Malloced {
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
private: private:
enum EvacuationMode {
kObjectsNewToOld,
kPageNewToOld,
kObjectsOldToOld,
};
static const int kInitialLocalPretenuringFeedbackCapacity = 256; static const int kInitialLocalPretenuringFeedbackCapacity = 256;
inline Heap* heap() { return collector_->heap(); } Heap* heap() { return collector_->heap(); }
inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
// Note: The order of checks is important in this function.
if (chunk->InNewSpace()) return kObjectsNewToOld;
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
return kPageNewToOld;
DCHECK(chunk->IsEvacuationCandidate());
return kObjectsOldToOld;
}
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration; duration_ += duration;
bytes_compacted_ += bytes_compacted; bytes_compacted_ += bytes_compacted;
} }
template <IterationMode mode, class Visitor> template <IterationMode mode>
inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor); inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
MarkCompactCollector* collector_; MarkCompactCollector* collector_;
...@@ -3115,7 +3064,6 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3115,7 +3064,6 @@ class MarkCompactCollector::Evacuator : public Malloced {
// Visitors for the corresponding spaces. // Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_; EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor new_space_page_visitor;
EvacuateOldSpaceVisitor old_space_visitor_; EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info. // Book keeping info.
...@@ -3123,18 +3071,17 @@ class MarkCompactCollector::Evacuator : public Malloced { ...@@ -3123,18 +3071,17 @@ class MarkCompactCollector::Evacuator : public Malloced {
intptr_t bytes_compacted_; intptr_t bytes_compacted_;
}; };
template <MarkCompactCollector::IterationMode mode, class Visitor> template <MarkCompactCollector::IterationMode mode>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
Visitor* visitor) { MemoryChunk* p, HeapObjectVisitor* visitor) {
bool success = false; bool success = false;
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
int saved_live_bytes = p->LiveBytes(); int saved_live_bytes = p->LiveBytes();
double evacuation_time; double evacuation_time;
{ {
AlwaysAllocateScope always_allocate(heap()->isolate()); AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time); TimedScope timed_scope(&evacuation_time);
success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); success = collector_->VisitLiveObjects(p, visitor, mode);
} }
if (FLAG_trace_evacuation) { if (FLAG_trace_evacuation) {
const char age_mark_tag = const char age_mark_tag =
...@@ -3146,9 +3093,8 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, ...@@ -3146,9 +3093,8 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
: '#'; : '#';
PrintIsolate(heap()->isolate(), PrintIsolate(heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
"page_evacuation=%d executable=%d live_bytes=%d time=%f\n", "executable=%d live_bytes=%d time=%f\n",
this, p, p->InNewSpace(), age_mark_tag, this, p, p->InNewSpace(), age_mark_tag,
p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
evacuation_time); evacuation_time);
} }
...@@ -3159,38 +3105,30 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, ...@@ -3159,38 +3105,30 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
} }
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
bool result = false; bool success = false;
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), if (chunk->InNewSpace()) {
NewSpacePage::kSweepingDone); DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
switch (ComputeEvacuationMode(chunk)) { NewSpacePage::kSweepingDone);
case kObjectsNewToOld: success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); DCHECK(success);
DCHECK(result); USE(success);
USE(result); } else {
break; DCHECK(chunk->IsEvacuationCandidate());
case kPageNewToOld: DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor); success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
DCHECK(result); if (!success) {
USE(result); // Aborted compaction page. We can record slots here to have them
break; // processed in parallel later on.
case kObjectsOldToOld: EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
if (!result) { DCHECK(success);
// Aborted compaction page. We can record slots here to have them USE(success);
// processed in parallel later on. // We need to return failure here to indicate that we want this page added
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); // to the sweeper.
result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); return false;
DCHECK(result); }
USE(result);
// We need to return failure here to indicate that we want this page
// added to the sweeper.
return false;
}
break;
default:
UNREACHABLE();
} }
return result; return success;
} }
void MarkCompactCollector::Evacuator::Finalize() { void MarkCompactCollector::Evacuator::Finalize() {
...@@ -3198,14 +3136,12 @@ void MarkCompactCollector::Evacuator::Finalize() { ...@@ -3198,14 +3136,12 @@ void MarkCompactCollector::Evacuator::Finalize() {
heap()->code_space()->MergeCompactionSpace( heap()->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE)); compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
new_space_page_visitor.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize( heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size()); new_space_visitor_.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter( heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() + new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size() + new_space_visitor_.semispace_copied_size());
new_space_page_visitor.promoted_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
} }
...@@ -3255,14 +3191,6 @@ class EvacuationJobTraits { ...@@ -3255,14 +3191,6 @@ class EvacuationJobTraits {
bool success, PerPageData data) { bool success, PerPageData data) {
if (chunk->InNewSpace()) { if (chunk->InNewSpace()) {
DCHECK(success); DCHECK(success);
} else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
DCHECK(success);
Page* p = static_cast<Page*>(chunk);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
heap->mark_compact_collector()->sweeper().AddLatePage(
p->owner()->identity(), p);
} else { } else {
Page* p = static_cast<Page*>(chunk); Page* p = static_cast<Page*>(chunk);
if (success) { if (success) {
...@@ -3292,15 +3220,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() { ...@@ -3292,15 +3220,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages); job.AddPage(page, &abandoned_pages);
} }
const Address age_mark = heap()->new_space()->age_mark();
for (NewSpacePage* page : newspace_evacuation_candidates_) { for (NewSpacePage* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes(); live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
!page->Contains(age_mark)) {
EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
}
job.AddPage(page, &abandoned_pages); job.AddPage(page, &abandoned_pages);
} }
DCHECK_GE(job.NumberOfPages(), 1); DCHECK_GE(job.NumberOfPages(), 1);
...@@ -3460,8 +3381,9 @@ static void VerifyAllBlackObjects(MemoryChunk* page) { ...@@ -3460,8 +3381,9 @@ static void VerifyAllBlackObjects(MemoryChunk* page) {
} }
#endif // VERIFY_HEAP #endif // VERIFY_HEAP
template <class Visitor>
bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor, bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
HeapObjectVisitor* visitor,
IterationMode mode) { IterationMode mode) {
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
VerifyAllBlackObjects(page); VerifyAllBlackObjects(page);
...@@ -3622,8 +3544,12 @@ class PointerUpdateJobTraits { ...@@ -3622,8 +3544,12 @@ class PointerUpdateJobTraits {
static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word(); MapWord map_word = object->map_word();
// There could still be stale pointers in large object space, map space, // Since we only filter invalid slots in old space, the store buffer can
// and old space for pages that have been promoted. // still contain stale pointers in large object and in map spaces. Ignore
// these pointers here.
DCHECK(map_word.IsForwardingAddress() ||
!object->GetHeap()->old_space()->Contains(
reinterpret_cast<Address>(address)));
if (map_word.IsForwardingAddress()) { if (map_word.IsForwardingAddress()) {
// Update the corresponding slot. // Update the corresponding slot.
*address = map_word.ToForwardingAddress(); *address = map_word.ToForwardingAddress();
......
...@@ -628,13 +628,14 @@ class MarkCompactCollector { ...@@ -628,13 +628,14 @@ class MarkCompactCollector {
void RegisterExternallyReferencedObject(Object** object); void RegisterExternallyReferencedObject(Object** object);
private: private:
class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor; class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor; class EvacuateOldSpaceVisitor;
class EvacuateRecordOnlyVisitor; class EvacuateRecordOnlyVisitor;
class EvacuateVisitorBase; class EvacuateVisitorBase;
class HeapObjectVisitor; class HeapObjectVisitor;
typedef std::vector<Page*> SweepingList;
explicit MarkCompactCollector(Heap* heap); explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code); bool WillBeDeoptimized(Code* code);
...@@ -827,8 +828,7 @@ class MarkCompactCollector { ...@@ -827,8 +828,7 @@ class MarkCompactCollector {
// Iterates through all live objects on a page using marking information. // Iterates through all live objects on a page using marking information.
// Returns whether all objects have successfully been visited. // Returns whether all objects have successfully been visited.
template <class Visitor> bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode); IterationMode mode);
void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor); void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
......
...@@ -287,7 +287,6 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk, ...@@ -287,7 +287,6 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// PagedSpace // PagedSpace
template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) { PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
...@@ -300,25 +299,11 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, ...@@ -300,25 +299,11 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
// Make sure that categories are initialized before freeing the area. // Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories(); page->InitializeFreeListCategories();
// In the case we do not free the memory, we effectively account for the whole owner->Free(page->area_start(), page->area_size());
// page as allocated memory that cannot be used for further allocations.
if (mode == kFreeMemory) {
owner->Free(page->area_start(), page->area_size());
}
return page; return page;
} }
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) {
old_page->set_owner(new_owner);
old_page->SetFlags(0, ~0);
new_owner->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
new_page->InsertAfter(new_owner->anchor()->prev_page());
return new_page;
}
void Page::InitializeFreeListCategories() { void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) { for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i)); categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
......
...@@ -1161,7 +1161,7 @@ bool PagedSpace::Expand() { ...@@ -1161,7 +1161,7 @@ bool PagedSpace::Expand() {
Page* p = Page* p =
heap()->memory_allocator()->AllocatePage<Page>(size, this, executable()); heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
if (p == nullptr) return false; if (p == NULL) return false;
AccountCommitted(static_cast<intptr_t>(p->size())); AccountCommitted(static_cast<intptr_t>(p->size()));
...@@ -1817,19 +1817,6 @@ void SemiSpace::Reset() { ...@@ -1817,19 +1817,6 @@ void SemiSpace::Reset() {
current_page_ = anchor_.next_page(); current_page_ = anchor_.next_page();
} }
void SemiSpace::ReplaceWithEmptyPage(NewSpacePage* old_page) {
NewSpacePage* new_page =
heap()->memory_allocator()->AllocatePage<NewSpacePage>(
NewSpacePage::kAllocatableMemory, this, executable());
Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), NewSpacePage::kCopyAllFlags);
new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page);
old_page->prev_page()->set_next_page(new_page);
heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
ClearRecordedSlots::kNo);
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them. // We won't be swapping semispaces without data in them.
......
...@@ -419,10 +419,6 @@ class MemoryChunk { ...@@ -419,10 +419,6 @@ class MemoryChunk {
// to grey transition is performed in the value. // to grey transition is performed in the value.
HAS_PROGRESS_BAR, HAS_PROGRESS_BAR,
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently // A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also // cannot be iterated because it is not swept. Moreover live bytes are also
// not updated. // not updated.
...@@ -828,8 +824,6 @@ class MemoryChunk { ...@@ -828,8 +824,6 @@ class MemoryChunk {
// Page* p = Page::FromAllocationTop(top); // Page* p = Page::FromAllocationTop(top);
class Page : public MemoryChunk { class Page : public MemoryChunk {
public: public:
static inline Page* Convert(NewSpacePage* old_page, PagedSpace* new_owner);
// Returns the page containing a given address. The address ranges // Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[ // from [page_addr .. page_addr + kPageSize[
// This only works if the object is in fact in a page. See also MemoryChunk:: // This only works if the object is in fact in a page. See also MemoryChunk::
...@@ -944,9 +938,6 @@ class Page : public MemoryChunk { ...@@ -944,9 +938,6 @@ class Page : public MemoryChunk {
inline void ClearEvacuationCandidate(); inline void ClearEvacuationCandidate();
private: private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner); Executability executable, PagedSpace* owner);
...@@ -1050,6 +1041,11 @@ class Space : public Malloced { ...@@ -1050,6 +1041,11 @@ class Space : public Malloced {
} }
} }
#ifdef DEBUG
virtual void Print() = 0;
#endif
protected:
void AccountCommitted(intptr_t bytes) { void AccountCommitted(intptr_t bytes) {
DCHECK_GE(bytes, 0); DCHECK_GE(bytes, 0);
committed_ += bytes; committed_ += bytes;
...@@ -1064,11 +1060,6 @@ class Space : public Malloced { ...@@ -1064,11 +1060,6 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0); DCHECK_GE(committed_, 0);
} }
#ifdef DEBUG
virtual void Print() = 0;
#endif
protected:
v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_; v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
bool allocation_observers_paused_; bool allocation_observers_paused_;
...@@ -2364,8 +2355,6 @@ class NewSpacePage : public MemoryChunk { ...@@ -2364,8 +2355,6 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const intptr_t kCopyAllFlags = ~0;
// Create a NewSpacePage object that is only used as anchor // Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages. // for the doubly-linked list of real pages.
explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); } explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
...@@ -2451,8 +2440,6 @@ class SemiSpace : public Space { ...@@ -2451,8 +2440,6 @@ class SemiSpace : public Space {
// Resets the space to using the first page. // Resets the space to using the first page.
void Reset(); void Reset();
void ReplaceWithEmptyPage(NewSpacePage* page);
// Age mark accessors. // Age mark accessors.
Address age_mark() { return age_mark_; } Address age_mark() { return age_mark_; }
void set_age_mark(Address mark); void set_age_mark(Address mark);
...@@ -2673,12 +2660,6 @@ class NewSpace : public Space { ...@@ -2673,12 +2660,6 @@ class NewSpace : public Space {
inline size_t AllocatedSinceLastGC(); inline size_t AllocatedSinceLastGC();
void ReplaceWithEmptyPage(NewSpacePage* page) {
// This method is called after flipping the semispace.
DCHECK(page->InFromSpace());
from_space_.ReplaceWithEmptyPage(page);
}
// Return the maximum capacity of a semispace. // Return the maximum capacity of a semispace.
int MaximumCapacity() { int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity()); DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
......
...@@ -3524,8 +3524,6 @@ TEST(ReleaseOverReservedPages) { ...@@ -3524,8 +3524,6 @@ TEST(ReleaseOverReservedPages) {
// Concurrent sweeping adds non determinism, depending on when memory is // Concurrent sweeping adds non determinism, depending on when memory is
// available for further reuse. // available for further reuse.
i::FLAG_concurrent_sweeping = false; i::FLAG_concurrent_sweeping = false;
// Fast evacuation of pages may result in a different page count in old space.
i::FLAG_page_promotion = false;
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
...@@ -6575,51 +6573,5 @@ HEAP_TEST(Regress589413) { ...@@ -6575,51 +6573,5 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE); heap->CollectGarbage(OLD_SPACE);
} }
UNINITIALIZED_TEST(PagePromotion) {
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 50; // %
i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
NewSpacePage* first_page =
NewSpacePage::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap->CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -63,48 +63,37 @@ static inline std::vector<Handle<FixedArray>> CreatePadding( ...@@ -63,48 +63,37 @@ static inline std::vector<Handle<FixedArray>> CreatePadding(
// Helper function that simulates a full new-space in the heap. // Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage( static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
space->DisableInlineAllocationSteps(); space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() - int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address()); *space->allocation_top_address());
if (space_remaining == 0) return false; if (space_remaining == 0) return false;
std::vector<Handle<FixedArray>> handles = CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
if (out_handles != nullptr)
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
return true; return true;
} }
// Helper function that simulates a fill new-space in the heap. // Helper function that simulates a fill new-space in the heap.
static inline void AllocateAllButNBytes( static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
v8::internal::NewSpace* space, int extra_bytes, int extra_bytes) {
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
space->DisableInlineAllocationSteps(); space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() - int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address()); *space->allocation_top_address());
CHECK(space_remaining >= extra_bytes); CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes; int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return; if (new_linear_size == 0) return;
std::vector<Handle<FixedArray>> handles = CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
if (out_handles != nullptr)
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
} }
static inline void FillCurrentPage(
v8::internal::NewSpace* space, static inline void FillCurrentPage(v8::internal::NewSpace* space) {
std::vector<Handle<FixedArray>>* out_handles = nullptr) { AllocateAllButNBytes(space, 0);
AllocateAllButNBytes(space, 0, out_handles);
} }
static inline void SimulateFullSpace(
v8::internal::NewSpace* space, static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
std::vector<Handle<FixedArray>>* out_handles = nullptr) { FillCurrentPage(space);
FillCurrentPage(space, out_handles); while (FillUpOnePage(space)) {
while (FillUpOnePage(space, out_handles) || space->AddFreshPage()) {
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment