Commit 0d7e23a6 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Add page evacuation mode for new->old

In a full mark-compact GC, instead of copying memory to old space for
pages that have more than X% live bytes, we just move the whole page over to old
space.

X=70 (default value)

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1863983002

Cr-Commit-Position: refs/heads/master@{#35610}
parent d784c2d1
......@@ -253,6 +253,9 @@ DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false,
"trace pretenuring decisions of HAllocate instructions")
DEFINE_BOOL(trace_pretenuring_statistics, false,
......
......@@ -6217,7 +6217,6 @@ void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
void Heap::ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
......@@ -6252,7 +6251,6 @@ void Heap::ExternalStringTable::CleanUp() {
#endif
}
void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
......
This diff is collapsed.
......@@ -628,14 +628,13 @@ class MarkCompactCollector {
void RegisterExternallyReferencedObject(Object** object);
private:
class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class EvacuateRecordOnlyVisitor;
class EvacuateVisitorBase;
class HeapObjectVisitor;
typedef std::vector<Page*> SweepingList;
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
......@@ -828,7 +827,8 @@ class MarkCompactCollector {
// Iterates through all live objects on a page using marking information.
// Returns whether all objects have successfully been visited.
bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
template <class Visitor>
bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode);
void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
......
......@@ -287,6 +287,7 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
// --------------------------------------------------------------------------
// PagedSpace
template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
......@@ -299,11 +300,25 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
// In the case we do not free the memory, we effectively account for the whole
// page as allocated memory that cannot be used for further allocations.
if (mode == kFreeMemory) {
owner->Free(page->area_start(), page->area_size());
}
return page;
}
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) {
old_page->set_owner(new_owner);
old_page->SetFlags(0, ~0);
new_owner->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
new_page->InsertAfter(new_owner->anchor()->prev_page());
return new_page;
}
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
......
......@@ -1161,7 +1161,7 @@ bool PagedSpace::Expand() {
Page* p =
heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
if (p == NULL) return false;
if (p == nullptr) return false;
AccountCommitted(static_cast<intptr_t>(p->size()));
......@@ -1817,6 +1817,19 @@ void SemiSpace::Reset() {
current_page_ = anchor_.next_page();
}
void SemiSpace::ReplaceWithEmptyPage(NewSpacePage* old_page) {
NewSpacePage* new_page =
heap()->memory_allocator()->AllocatePage<NewSpacePage>(
NewSpacePage::kAllocatableMemory, this, executable());
Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), NewSpacePage::kCopyAllFlags);
new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page);
old_page->prev_page()->set_next_page(new_page);
heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
ClearRecordedSlots::kNo);
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
......
......@@ -419,6 +419,10 @@ class MemoryChunk {
// to grey transition is performed in the value.
HAS_PROGRESS_BAR,
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also
// not updated.
......@@ -824,6 +828,8 @@ class MemoryChunk {
// Page* p = Page::FromAllocationTop(top);
class Page : public MemoryChunk {
public:
static inline Page* Convert(NewSpacePage* old_page, PagedSpace* new_owner);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[
// This only works if the object is in fact in a page. See also MemoryChunk::
......@@ -938,6 +944,9 @@ class Page : public MemoryChunk {
inline void ClearEvacuationCandidate();
private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
......@@ -1041,11 +1050,6 @@ class Space : public Malloced {
}
}
#ifdef DEBUG
virtual void Print() = 0;
#endif
protected:
void AccountCommitted(intptr_t bytes) {
DCHECK_GE(bytes, 0);
committed_ += bytes;
......@@ -1060,6 +1064,11 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0);
}
#ifdef DEBUG
virtual void Print() = 0;
#endif
protected:
v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
bool allocation_observers_paused_;
......@@ -2355,6 +2364,8 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const intptr_t kCopyAllFlags = ~0;
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
......@@ -2440,6 +2451,8 @@ class SemiSpace : public Space {
// Resets the space to using the first page.
void Reset();
void ReplaceWithEmptyPage(NewSpacePage* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
......@@ -2660,6 +2673,12 @@ class NewSpace : public Space {
inline size_t AllocatedSinceLastGC();
void ReplaceWithEmptyPage(NewSpacePage* page) {
// This method is called after flipping the semispace.
DCHECK(page->InFromSpace());
from_space_.ReplaceWithEmptyPage(page);
}
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
......
......@@ -3524,6 +3524,8 @@ TEST(ReleaseOverReservedPages) {
// Concurrent sweeping adds non determinism, depending on when memory is
// available for further reuse.
i::FLAG_concurrent_sweeping = false;
// Fast evacuation of pages may result in a different page count in old space.
i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
......@@ -6573,5 +6575,51 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE);
}
UNINITIALIZED_TEST(PagePromotion) {
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 50; // %
i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
NewSpacePage* first_page =
NewSpacePage::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap->CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
} // namespace internal
} // namespace v8
......@@ -63,37 +63,48 @@ static inline std::vector<Handle<FixedArray>> CreatePadding(
// Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
static inline bool FillUpOnePage(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
if (space_remaining == 0) return false;
std::vector<Handle<FixedArray>> handles =
CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
if (out_handles != nullptr)
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
return true;
}
// Helper function that simulates a fill new-space in the heap.
static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
int extra_bytes) {
static inline void AllocateAllButNBytes(
v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return;
std::vector<Handle<FixedArray>> handles =
CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
if (out_handles != nullptr)
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
}
static inline void FillCurrentPage(v8::internal::NewSpace* space) {
AllocateAllButNBytes(space, 0);
static inline void FillCurrentPage(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
AllocateAllButNBytes(space, 0, out_handles);
}
static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
FillCurrentPage(space);
while (FillUpOnePage(space)) {
static inline void SimulateFullSpace(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
FillCurrentPage(space, out_handles);
while (FillUpOnePage(space, out_handles) || space->AddFreshPage()) {
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment